[37/51] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, HDFS-7435 and HDFS-7930 (this commit is for HDFS-7930 only)

2015-04-06 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, 
HDFS-7435 and HDFS-7930 (this commit is for HDFS-7930 only)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37686c5d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37686c5d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37686c5d

Branch: refs/heads/HDFS-7285
Commit: 37686c5dfbbcd7e7feaca56829eb946d132f0df2
Parents: a129dda
Author: Zhe Zhang z...@apache.org
Authored: Mon Mar 23 11:25:40 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:01 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 7 ---
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 7 ---
 .../org/apache/hadoop/hdfs/server/namenode/INodeFile.java | 2 +-
 3 files changed, 9 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37686c5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 291347b..a7a7a45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2114,17 +2114,18 @@ public class BlockManager {
* Mark block replicas as corrupt except those on the storages in 
* newStorages list.
*/
-  public void markBlockReplicasAsCorrupt(BlockInfoContiguous block, 
+  public void markBlockReplicasAsCorrupt(Block oldBlock,
+  BlockInfo block,
   long oldGenerationStamp, long oldNumBytes, 
   DatanodeStorageInfo[] newStorages) throws IOException {
 assert namesystem.hasWriteLock();
 BlockToMarkCorrupt b = null;
 if (block.getGenerationStamp() != oldGenerationStamp) {
-  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
   genstamp does not match  + oldGenerationStamp
   +  :  + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
 } else if (block.getNumBytes() != oldNumBytes) {
-  b = new BlockToMarkCorrupt(block,
+  b = new BlockToMarkCorrupt(oldBlock, block,
   length does not match  + oldNumBytes
   +  :  + block.getNumBytes(), Reason.SIZE_MISMATCH);
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37686c5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 777a084..3fec364 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2795,7 +2795,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   /** Compute quota change for converting a complete block to a UC block */
   private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
 final QuotaCounts delta = new QuotaCounts.Builder().build();
-final BlockInfoContiguous lastBlock = file.getLastBlock();
+final BlockInfo lastBlock = file.getLastBlock();
 if (lastBlock != null) {
   final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
   final short repl = file.getBlockReplication();
@@ -4359,8 +4359,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 } else {
   iFile.convertLastBlockToUC(storedBlock, trimmedStorageInfos);
   if (closeFile) {
-blockManager.markBlockReplicasAsCorrupt(storedBlock,
-oldGenerationStamp, oldNumBytes, trimmedStorageInfos);
+blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(),
+storedBlock, oldGenerationStamp, oldNumBytes,
+trimmedStorageInfos);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37686c5d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 

[13/51] [abbrv] hadoop git commit: HADOOP-11541. Raw XOR coder

2015-04-06 Thread zhz
HADOOP-11541. Raw XOR coder


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f50b3df5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f50b3df5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f50b3df5

Branch: refs/heads/HDFS-7285
Commit: f50b3df51ffce1ce0d67b9601840ce0fdcb7e750
Parents: b431203
Author: Kai Zheng dran...@apache.org
Authored: Sun Feb 8 01:40:27 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:01:05 2015 -0700

--
 .../io/erasurecode/rawcoder/XorRawDecoder.java  |  81 ++
 .../io/erasurecode/rawcoder/XorRawEncoder.java  |  61 +
 .../hadoop/io/erasurecode/TestCoderBase.java| 262 +++
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  96 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   |  52 
 5 files changed, 552 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f50b3df5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
new file mode 100644
index 000..98307a7
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawDecoder.java
@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw decoder in XOR code scheme in pure Java, adapted from HDFS-RAID.
+ */
+public class XorRawDecoder extends AbstractRawErasureDecoder {
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].remaining();
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0].put(j, (byte) 0);
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0].put(j, (byte) (outputs[0].get(j) ^ inputs[i].get(j)));
+  }
+}
+  }
+
+  @Override
+  protected void doDecode(byte[][] inputs, int[] erasedIndexes,
+  byte[][] outputs) {
+assert(erasedIndexes.length == outputs.length);
+assert(erasedIndexes.length = 1);
+
+int bufSize = inputs[0].length;
+int erasedIdx = erasedIndexes[0];
+
+// Set the output to zeros.
+for (int j = 0; j  bufSize; j++) {
+  outputs[0][j] = 0;
+}
+
+// Process the inputs.
+for (int i = 0; i  inputs.length; i++) {
+  // Skip the erased location.
+  if (i == erasedIdx) {
+continue;
+  }
+
+  for (int j = 0; j  bufSize; j++) {
+outputs[0][j] ^= inputs[i][j];
+  }
+}
+  }
+
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f50b3df5/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
new file mode 100644
index 000..99b20b9
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/XorRawEncoder.java
@@ -0,0 +1,61 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE 

[23/51] [abbrv] hadoop git commit: HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to print striped blocks. Contributed by Takuya Fukudome.

2015-04-06 Thread zhz
HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to print 
striped blocks. Contributed by Takuya Fukudome.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffe990b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffe990b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffe990b6

Branch: refs/heads/HDFS-7285
Commit: ffe990b622bea084f89c94663af9c192e4fb0303
Parents: d0de9b4
Author: Jing Zhao ji...@apache.org
Authored: Thu Mar 5 16:44:38 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:57 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java   | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffe990b6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 093ba25..22d61bc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -877,8 +877,8 @@ public class INodeFile extends INodeWithAdditionalFields
 out.print(, fileSize= + computeFileSize(snapshotId));
 // only compare the first block
 out.print(, blocks=);
-out.print(blocks == null || blocks.length == 0? null: blocks[0]);
-// TODO print striped blocks
+BlockInfo[] blks = getBlocks();
+out.print(blks == null || blks.length == 0? null: blks[0]);
 out.println();
   }
 



[45/51] [abbrv] hadoop git commit: HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery striped blocks in NameNode. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery striped 
blocks in NameNode. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0767bd70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0767bd70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0767bd70

Branch: refs/heads/HDFS-7285
Commit: 0767bd70c1f987c27bfa184735be185ce937ffe1
Parents: 5e3f732
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 30 11:25:09 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:03 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  25 ++-
 .../server/blockmanagement/BlockManager.java| 203 ++-
 .../blockmanagement/DecommissionManager.java|  86 
 .../hdfs/server/namenode/FSNamesystem.java  |   8 +-
 .../server/blockmanagement/TestNodeCount.java   |   2 +-
 .../TestOverReplicatedBlocks.java   |   4 +-
 6 files changed, 172 insertions(+), 156 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0767bd70/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 30b5ee7..4a85efb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -18,11 +18,13 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+
 import java.io.DataOutput;
 import java.io.IOException;
 
+import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
+
 /**
  * Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
  *
@@ -37,7 +39,6 @@ import java.io.IOException;
  * array to record the block index for each triplet.
  */
 public class BlockInfoStriped extends BlockInfo {
-  private final int   chunkSize = HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
   private final short dataBlockNum;
   private final short parityBlockNum;
   /**
@@ -132,6 +133,22 @@ public class BlockInfoStriped extends BlockInfo {
 return i == -1 ? -1 : indices[i];
   }
 
+  /**
+   * Identify the block stored in the given datanode storage. Note that
+   * the returned block has the same block Id with the one seen/reported by the
+   * DataNode.
+   */
+  Block getBlockOnStorage(DatanodeStorageInfo storage) {
+int index = getStorageBlockIndex(storage);
+if (index  0) {
+  return null;
+} else {
+  Block block = new Block(this);
+  block.setBlockId(this.getBlockId() + index);
+  return block;
+}
+  }
+
   @Override
   boolean removeStorage(DatanodeStorageInfo storage) {
 int dnIndex = findStorageInfoFromEnd(storage);
@@ -186,8 +203,8 @@ public class BlockInfoStriped extends BlockInfo {
 // In case striped blocks, total usage by this striped blocks should
 // be the total of data blocks and parity blocks because
 // `getNumBytes` is the total of actual data block size.
-return ((getNumBytes() - 1) / (dataBlockNum * chunkSize) + 1)
-* chunkSize * parityBlockNum + getNumBytes();
+return ((getNumBytes() - 1) / (dataBlockNum * BLOCK_STRIPED_CHUNK_SIZE) + 
1)
+* BLOCK_STRIPED_CHUNK_SIZE * parityBlockNum + getNumBytes();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0767bd70/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 66a02d4..e05330c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -177,7 +177,11 @@ public class BlockManager {
   /** Store blocks - datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
 
-  

[46/51] [abbrv] hadoop git commit: HDFS-7839. Erasure coding: implement facilities in NameNode to create and manage EC zones. Contributed by Zhe Zhang

2015-04-06 Thread zhz
HDFS-7839. Erasure coding: implement facilities in NameNode to create and 
manage EC zones. Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea91d07e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea91d07e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea91d07e

Branch: refs/heads/HDFS-7285
Commit: ea91d07e417e0f678954f1f694d2176b6aebe682
Parents: a4a4032
Author: Zhe Zhang z...@apache.org
Authored: Thu Apr 2 22:38:29 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:04 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  15 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   8 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 -
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  16 ++
 .../BlockStoragePolicySuite.java|   5 -
 .../hdfs/server/common/HdfsServerConstants.java |   2 +
 .../namenode/ErasureCodingZoneManager.java  | 112 ++
 .../hdfs/server/namenode/FSDirRenameOp.java |   2 +
 .../hdfs/server/namenode/FSDirectory.java   |  26 +++-
 .../hdfs/server/namenode/FSNamesystem.java  |  40 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  10 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |   9 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java |  12 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 151 +++
 .../TestBlockInitialEncoding.java   |  75 -
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java|   6 +-
 .../hdfs/server/namenode/TestFSImage.java   |  23 ++-
 .../namenode/TestRecoverStripedBlocks.java  |   7 +-
 21 files changed, 431 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea91d07e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 29bb604..6a82160 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3320,6 +3320,21 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
+  public void createErasureCodingZone(String src)
+  throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(createErasureCodingZone, src);
+try {
+  namenode.createErasureCodingZone(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class,
+  UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public void setXAttr(String src, String name, byte[] value, 
   EnumSetXAttrSetFlag flag) throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea91d07e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index bafb02b..8efe344 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1363,6 +1363,14 @@ public interface ClientProtocol {
   long prevId) throws IOException;
 
   /**
+   * Create an erasure coding zone (currently with hardcoded schema)
+   * TODO: Configurable and pluggable schemas (HDFS-7337)
+   */
+  @Idempotent
+  public void createErasureCodingZone(String src)
+  throws IOException;
+
+  /**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by .. For example,
* user.attr.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ea91d07e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 

[48/51] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this commit is for conflicts from HDFS-6945). Contributed by Zhe Zhang.

2015-04-06 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this 
commit is for conflicts from HDFS-6945). Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4a40322
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4a40322
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4a40322

Branch: refs/heads/HDFS-7285
Commit: a4a403226b8b61b063a5b18b61ab1f383f2d7ac9
Parents: 0800245
Author: Zhe Zhang z...@apache.org
Authored: Thu Apr 2 11:25:58 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:04 2015 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4a40322/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f42adcd..bce789a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3667,7 +3667,7 @@ public class BlockManager {
   private void removeFromExcessReplicateMap(Block block) {
 for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
   String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
-  LightWeightLinkedSetBlock excessReplicas = 
excessReplicateMap.get(uuid);
+  LightWeightLinkedSetBlockInfo excessReplicas = 
excessReplicateMap.get(uuid);
   if (excessReplicas != null) {
 if (excessReplicas.remove(block)) {
   excessBlocksCount.decrementAndGet();



[19/51] [abbrv] hadoop git commit: HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed by Jing Zhao.

2015-04-06 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/565cef08/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
new file mode 100644
index 000..47445be
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileWithStripedBlocksFeature.java
@@ -0,0 +1,112 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+
+/**
+ * Feature for file with striped blocks
+ */
+class FileWithStripedBlocksFeature implements INode.Feature {
+  private BlockInfoStriped[] blocks;
+
+  FileWithStripedBlocksFeature() {
+blocks = new BlockInfoStriped[0];
+  }
+
+  FileWithStripedBlocksFeature(BlockInfoStriped[] blocks) {
+Preconditions.checkArgument(blocks != null);
+this.blocks = blocks;
+  }
+
+  BlockInfoStriped[] getBlocks() {
+return this.blocks;
+  }
+
+  void setBlock(int index, BlockInfoStriped blk) {
+blocks[index] = blk;
+  }
+
+  BlockInfoStriped getLastBlock() {
+return blocks == null || blocks.length == 0 ?
+null : blocks[blocks.length - 1];
+  }
+
+  int numBlocks() {
+return blocks == null ? 0 : blocks.length;
+  }
+
+  void updateBlockCollection(INodeFile file) {
+if (blocks != null) {
+  for (BlockInfoStriped blk : blocks) {
+blk.setBlockCollection(file);
+  }
+}
+  }
+
+  private void setBlocks(BlockInfoStriped[] blocks) {
+this.blocks = blocks;
+  }
+
+  void addBlock(BlockInfoStriped newBlock) {
+if (this.blocks == null) {
+  this.setBlocks(new BlockInfoStriped[]{newBlock});
+} else {
+  int size = this.blocks.length;
+  BlockInfoStriped[] newlist = new BlockInfoStriped[size + 1];
+  System.arraycopy(this.blocks, 0, newlist, 0, size);
+  newlist[size] = newBlock;
+  this.setBlocks(newlist);
+}
+  }
+
+  boolean removeLastBlock(Block oldblock) {
+if (blocks == null || blocks.length == 0) {
+  return false;
+}
+int newSize = blocks.length - 1;
+if (!blocks[newSize].equals(oldblock)) {
+  return false;
+}
+
+//copy to a new list
+BlockInfoStriped[] newlist = new BlockInfoStriped[newSize];
+System.arraycopy(blocks, 0, newlist, 0, newSize);
+setBlocks(newlist);
+return true;
+  }
+
+  void truncateStripedBlocks(int n) {
+final BlockInfoStriped[] newBlocks;
+if (n == 0) {
+  newBlocks = new BlockInfoStriped[0];
+} else {
+  newBlocks = new BlockInfoStriped[n];
+  System.arraycopy(getBlocks(), 0, newBlocks, 0, n);
+}
+// set new blocks
+setBlocks(newBlocks);
+  }
+
+  void clear() {
+this.blocks = null;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/565cef08/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index b09463d..5462da7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import static 
org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite.ID_UNSPECIFIED;
+import static 

[26/51] [abbrv] hadoop git commit: HADOOP-11646. Erasure Coder API for encoding and decoding of block group ( Contributed by Kai Zheng )

2015-04-06 Thread zhz
HADOOP-11646. Erasure Coder API for encoding and decoding of block group ( 
Contributed by Kai Zheng )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c8d4c251
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c8d4c251
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c8d4c251

Branch: refs/heads/HDFS-7285
Commit: c8d4c25153b26b57e916261d63936a0de2aa8b14
Parents: ffe990b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Mar 9 12:32:26 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:58 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   2 +
 .../apache/hadoop/io/erasurecode/ECBlock.java   |  80 ++
 .../hadoop/io/erasurecode/ECBlockGroup.java |  82 ++
 .../erasurecode/coder/AbstractErasureCoder.java |  63 +
 .../coder/AbstractErasureCodingStep.java|  59 
 .../coder/AbstractErasureDecoder.java   | 152 +++
 .../coder/AbstractErasureEncoder.java   |  50 
 .../io/erasurecode/coder/ErasureCoder.java  |  77 ++
 .../io/erasurecode/coder/ErasureCodingStep.java |  55 
 .../io/erasurecode/coder/ErasureDecoder.java|  41 +++
 .../erasurecode/coder/ErasureDecodingStep.java  |  52 
 .../io/erasurecode/coder/ErasureEncoder.java|  39 +++
 .../erasurecode/coder/ErasureEncodingStep.java  |  49 
 .../io/erasurecode/coder/XorErasureDecoder.java |  78 ++
 .../io/erasurecode/coder/XorErasureEncoder.java |  45 
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/coder/TestErasureCoderBase.java | 266 +++
 .../io/erasurecode/coder/TestXorCoder.java  |  50 
 18 files changed, 1241 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8d4c251/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index ee42c84..c17a1bd 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -15,4 +15,6 @@
 HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai 
Zheng
 ( Kai Zheng )
 
+HADOOP-11646. Erasure Coder API for encoding and decoding of block group
+( Kai Zheng via vinayakumarb )
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c8d4c251/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
new file mode 100644
index 000..956954a
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlock.java
@@ -0,0 +1,80 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+/**
+ * A wrapper of block level data source/output that {@link ECChunk}s can be
+ * extracted from. For HDFS, it can be an HDFS block (250MB). Note it only 
cares
+ * about erasure coding specific logic thus avoids coupling with any HDFS block
+ * details. We can have something like HdfsBlock extend it.
+ */
+public class ECBlock {
+
+  private boolean isParity;
+  private boolean isErased;
+
+  /**
+   * A default constructor. isParity and isErased are false by default.
+   */
+  public ECBlock() {
+this(false, false);
+  }
+
+  /**
+   * A constructor specifying isParity and isErased.
+   * @param isParity
+   * @param isErased
+   */
+  public ECBlock(boolean isParity, boolean isErased) {
+this.isParity = isParity;
+this.isErased = isErased;
+  }
+
+  /**
+   * Set true if it's for a parity block.
+   * @param isParity
+   */

[35/51] [abbrv] hadoop git commit: HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks. Contributed by GAO Rui.

2015-04-06 Thread zhz
HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks. 
Contributed by GAO Rui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7c7ba714
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7c7ba714
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7c7ba714

Branch: refs/heads/HDFS-7285
Commit: 7c7ba714b047c20815ebe2b35083d5f664e7d972
Parents: 37686c5
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 23 15:06:53 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:01 2015 -0700

--
 .../server/blockmanagement/BlockIdManager.java |  6 ++
 .../hdfs/server/blockmanagement/BlockManager.java  | 12 +++-
 .../hdfs/server/blockmanagement/BlocksMap.java |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 17 -
 .../hadoop/hdfs/server/namenode/SafeMode.java  |  5 +++--
 .../java/org/apache/hadoop/hdfs/TestSafeMode.java  | 15 +--
 6 files changed, 42 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c7ba714/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 1d69d74..187f8c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -233,6 +233,12 @@ public class BlockIdManager {
 return id  0;
   }
 
+  /**
+   * The last 4 bits of HdfsConstants.BLOCK_GROUP_INDEX_MASK(15) is ,
+   * so the last 4 bits of (~HdfsConstants.BLOCK_GROUP_INDEX_MASK) is 
+   * and the other 60 bits are 1. Group ID is the first 60 bits of any
+   * data/parity block id in the same striped block group.
+   */
   public static long convertToStripedID(long id) {
 return id  (~HdfsConstants.BLOCK_GROUP_INDEX_MASK);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7c7ba714/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a7a7a45..2f3c87c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -684,8 +684,10 @@ public class BlockManager {
 // a forced completion when a file is getting closed by an
 // OP_CLOSE edit on the standby).
 namesystem.adjustSafeModeBlockTotals(0, 1);
+final int minStorage = curBlock.isStriped() ?
+((BlockInfoStriped) curBlock).getDataBlockNum() : minReplication;
 namesystem.incrementSafeBlockCount(
-Math.min(numNodes, minReplication));
+Math.min(numNodes, minStorage), curBlock);
 
 // replace block in the blocksMap
 return blocksMap.replaceBlock(completeBlock);
@@ -2208,7 +2210,7 @@ public class BlockManager {
 // refer HDFS-5283
 if (namesystem.isInSnapshot(storedBlock.getBlockCollection())) {
   int numOfReplicas = BlockInfo.getNumExpectedLocations(storedBlock);
-  namesystem.incrementSafeBlockCount(numOfReplicas);
+  namesystem.incrementSafeBlockCount(numOfReplicas, storedBlock);
 }
 //and fall through to next clause
   }  
@@ -2589,14 +2591,14 @@ public class BlockManager {
   // only complete blocks are counted towards that.
   // In the case that the block just became complete above, completeBlock()
   // handles the safe block count maintenance.
-  namesystem.incrementSafeBlockCount(numCurrentReplica);
+  namesystem.incrementSafeBlockCount(numCurrentReplica, storedBlock);
 }
   }
 
   /**
* Modify (block--datanode) map. Remove block from set of
* needed replications if this takes care of the problem.
-   * @return the block that is stored in blockMap.
+   * @return the block that is stored in blocksMap.
*/
   private Block addStoredBlock(final BlockInfo block,
final Block reportedBlock,
@@ -2665,7 +2667,7 @@ public class BlockManager {
 

[31/51] [abbrv] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt accordingly

2015-04-06 Thread zhz
Updated CHANGES-HDFS-EC-7285.txt accordingly


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08a4c9e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08a4c9e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08a4c9e2

Branch: refs/heads/HDFS-7285
Commit: 08a4c9e2c32d25942e438ba005db26b64d00b5b4
Parents: 37b917d
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Mar 18 19:24:24 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:00 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08a4c9e2/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index a97dc34..e27ff5c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -19,6 +19,9 @@
 ( Kai Zheng via vinayakumarb )
 
 HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng
-( Kai Zheng )
+( Kai Zheng )
+
+HADOOP-11706. Refine a little bit erasure coder API. Contributed by Kai 
Zheng
+( Kai Zheng )
 
 



[50/51] [abbrv] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang.

2015-04-06 Thread zhz
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bdb67d96
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bdb67d96
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bdb67d96

Branch: refs/heads/HDFS-7285
Commit: bdb67d96fbb0f23679f32e419bb17a7f8684411e
Parents: ea91d07
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 3 15:22:50 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:05 2015 -0700

--
 .../coder/AbstractErasureDecoder.java   |  7 ++--
 .../coder/AbstractErasureEncoder.java   |  7 ++--
 .../io/erasurecode/coder/ErasureCoder.java  | 12 ++
 .../io/erasurecode/coder/ErasureDecoder.java| 41 
 .../io/erasurecode/coder/ErasureEncoder.java| 39 ---
 .../erasurecode/coder/TestErasureCoderBase.java | 20 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 14 ++-
 7 files changed, 41 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdb67d96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
index 54a6d1e..cd31294 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
@@ -23,13 +23,12 @@ import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 /**
  * An abstract erasure decoder that's to be inherited by new decoders.
  *
- * It implements the {@link ErasureDecoder} interface.
+ * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureDecoder extends AbstractErasureCoder
-implements ErasureDecoder {
+public abstract class AbstractErasureDecoder extends AbstractErasureCoder {
 
   @Override
-  public ErasureCodingStep decode(ECBlockGroup blockGroup) {
+  public ErasureCodingStep calculateCoding(ECBlockGroup blockGroup) {
 // We may have more than this when considering complicate cases. 
HADOOP-11550
 return prepareDecodingStep(blockGroup);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdb67d96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
index 09b31e5..a836b75 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
@@ -23,13 +23,12 @@ import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 /**
  * An abstract erasure encoder that's to be inherited by new encoders.
  *
- * It implements the {@link ErasureEncoder} interface.
+ * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureEncoder extends AbstractErasureCoder
-implements ErasureEncoder {
+public abstract class AbstractErasureEncoder extends AbstractErasureCoder {
 
   @Override
-  public ErasureCodingStep encode(ECBlockGroup blockGroup) {
+  public ErasureCodingStep calculateCoding(ECBlockGroup blockGroup) {
 // We may have more than this when considering complicate cases. 
HADOOP-11550
 return prepareEncodingStep(blockGroup);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bdb67d96/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
index c5922f3..fb90156 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
+++ 

[20/51] [abbrv] hadoop git commit: HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-7749. Erasure Coding: Add striped block support in INodeFile. Contributed 
by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/565cef08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/565cef08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/565cef08

Branch: refs/heads/HDFS-7285
Commit: 565cef0818471d59d16c5540d06b279cd9dc3e1f
Parents: ae79047
Author: Jing Zhao ji...@apache.org
Authored: Wed Feb 25 22:10:26 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:19:29 2015 -0700

--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  17 ++
 .../server/blockmanagement/BlockCollection.java |  13 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  |  88 ++-
 .../BlockInfoContiguousUnderConstruction.java   |   6 +-
 .../blockmanagement/BlockInfoStriped.java   |  31 +++
 .../BlockInfoStripedUnderConstruction.java  | 240 ++
 .../server/blockmanagement/BlockManager.java| 151 +--
 .../CacheReplicationMonitor.java|  16 +-
 .../hdfs/server/namenode/FSDirConcatOp.java |   8 +-
 .../hdfs/server/namenode/FSDirectory.java   |   5 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |   8 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  16 +-
 .../hdfs/server/namenode/FSImageFormat.java |   7 +-
 .../server/namenode/FSImageFormatPBINode.java   |  46 +++-
 .../hdfs/server/namenode/FSNamesystem.java  | 110 
 .../namenode/FileUnderConstructionFeature.java  |  15 +-
 .../namenode/FileWithStripedBlocksFeature.java  | 112 
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 254 +--
 .../hdfs/server/namenode/LeaseManager.java  |   6 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |   4 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   3 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   7 +-
 .../server/namenode/snapshot/FileDiffList.java  |   9 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   5 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  10 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   3 +-
 .../blockmanagement/TestReplicationPolicy.java  |   4 +-
 .../hdfs/server/namenode/TestAddBlock.java  |  12 +-
 .../hdfs/server/namenode/TestAddBlockgroup.java |   3 +-
 .../namenode/TestBlockUnderConstruction.java|   6 +-
 .../hdfs/server/namenode/TestFSImage.java   |   4 +-
 .../hdfs/server/namenode/TestFileTruncate.java  |   4 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   4 +-
 .../snapshot/TestSnapshotBlocksMap.java |  24 +-
 .../namenode/snapshot/TestSnapshotDeletion.java |  16 +-
 35 files changed, 953 insertions(+), 314 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/565cef08/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 1942ea9..14410e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -172,6 +172,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageReportProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypeProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageTypesProto;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StorageUuidsProto;
+import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.StripedBlockProto;
 import org.apache.hadoop.hdfs.protocol.proto.InotifyProtos;
 import 
org.apache.hadoop.hdfs.protocol.proto.JournalProtocolProtos.JournalInfoProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.XAttrProtos.GetXAttrsResponseProto;
@@ -184,6 +185,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
 import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
@@ -430,6 +432,21 @@ public class PBHelper {
 return new Block(b.getBlockId(), b.getNumBytes(), b.getGenStamp());
   }
 
+  public static BlockInfoStriped 

[41/51] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, HDFS-7435, HDFS-7930, HDFS-7960 (this commit is for HDFS-7960)

2015-04-06 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, 
HDFS-7435, HDFS-7930, HDFS-7960 (this commit is for HDFS-7960)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4e62629
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4e62629
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4e62629

Branch: refs/heads/HDFS-7285
Commit: a4e626296989ed49d72f611fce30381298040225
Parents: f62c711
Author: Zhe Zhang z...@apache.org
Authored: Tue Mar 24 11:39:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:02 2015 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++--
 .../blockmanagement/TestNameNodePrunesMissingStorages.java  | 5 -
 .../hadoop/hdfs/server/namenode/TestAddStripedBlocks.java   | 2 +-
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4e62629/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 2f3c87c..66a02d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1975,10 +1975,10 @@ public class BlockManager {
  longer exists on the DataNode.,
   Long.toHexString(context.getReportId()), zombie.getStorageID());
 assert(namesystem.hasWriteLock());
-IteratorBlockInfoContiguous iter = zombie.getBlockIterator();
+IteratorBlockInfo iter = zombie.getBlockIterator();
 int prevBlocks = zombie.numBlocks();
 while (iter.hasNext()) {
-  BlockInfoContiguous block = iter.next();
+  BlockInfo block = iter.next();
   // We assume that a block can be on only one storage in a DataNode.
   // That's why we pass in the DatanodeDescriptor rather than the
   // DatanodeStorageInfo.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4e62629/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 4b97d01..e9329cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -171,9 +171,12 @@ public class TestNameNodePrunesMissingStorages {
   String datanodeUuid;
   // Find the first storage which this block is in.
   try {
+BlockInfo storedBlock =
+cluster.getNamesystem().getBlockManager().
+getStoredBlock(block.getLocalBlock());
 IteratorDatanodeStorageInfo storageInfoIter =
 cluster.getNamesystem().getBlockManager().
-getStorages(block.getLocalBlock()).iterator();
+blocksMap.getStorages(storedBlock).iterator();
 assertTrue(storageInfoIter.hasNext());
 DatanodeStorageInfo info = storageInfoIter.next();
 storageIdToRemove = info.getStorageID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4e62629/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index 05aec4b..7d7c81e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -269,7 +269,7 @@ public class TestAddStripedBlocks {
   StorageBlockReport[] reports = {new StorageBlockReport(storage,
   bll)};
   

[30/51] [abbrv] hadoop git commit: HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped blocks. Contributed by Kai Sasaki.

2015-04-06 Thread zhz
HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped 
blocks. Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/904099c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/904099c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/904099c4

Branch: refs/heads/HDFS-7285
Commit: 904099c49e217980ddc537cd668dc3fb6a436dd8
Parents: cef8ce4
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 16 16:37:08 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:59 2015 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  3 +
 .../blockmanagement/BlockInfoStriped.java   | 12 ++-
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 89 +---
 3 files changed, 90 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/904099c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 245b630..07b72e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -186,4 +186,7 @@ public class HdfsConstants {
   public static final byte NUM_PARITY_BLOCKS = 2;
   public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
+
+  // The chunk size for striped block which is used by erasure coding
+  public static final int BLOCK_STRIPED_CHUNK_SIZE = 64 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/904099c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 84c3be6..cef8318 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
 
 /**
@@ -34,6 +35,7 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
  * array to record the block index for each triplet.
  */
 public class BlockInfoStriped extends BlockInfo {
+  private final int   chunkSize = HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
   private final short dataBlockNum;
   private final short parityBlockNum;
   /**
@@ -56,7 +58,7 @@ public class BlockInfoStriped extends BlockInfo {
 this.setBlockCollection(b.getBlockCollection());
   }
 
-  short getTotalBlockNum() {
+  public short getTotalBlockNum() {
 return (short) (dataBlockNum + parityBlockNum);
   }
 
@@ -178,6 +180,14 @@ public class BlockInfoStriped extends BlockInfo {
 }
   }
 
+  public long spaceConsumed() {
+// In case striped blocks, total usage by this striped blocks should
+// be the total of data blocks and parity blocks because
+// `getNumBytes` is the total of actual data block size.
+return ((getNumBytes() - 1) / (dataBlockNum * chunkSize) + 1)
+* chunkSize * parityBlockNum + getNumBytes();
+  }
+
   @Override
   public final boolean isStriped() {
 return true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/904099c4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
index 22d61bc..9cd7ddd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
@@ -42,6 +42,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import 

[40/51] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this commit is for HDFS-7742)

2015-04-06 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this 
commit is for HDFS-7742)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5e3f7327
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5e3f7327
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5e3f7327

Branch: refs/heads/HDFS-7285
Commit: 5e3f7327dac9747da63d1d49316b13fb722977ba
Parents: a5097a4
Author: Zhe Zhang z...@apache.org
Authored: Mon Mar 30 10:23:09 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:02 2015 -0700

--
 .../hdfs/server/blockmanagement/TestBlockManager.java   | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5e3f7327/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index cbea3d8..43f4607 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -552,11 +552,11 @@ public class TestBlockManager {
 assertNotNull(Chooses decommissioning source node for a normal 
replication
 +  if all available source nodes have reached their replication
 +  limits below the hard limit.,
-bm.chooseSourceDatanode(
-aBlock,
+bm.chooseSourceDatanodes(
+bm.getStoredBlock(aBlock),
 cntNodes,
 liveNodes,
-new NumberReplicas(),
+new NumberReplicas(), new LinkedListShort(), 1,
 UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
 
 
@@ -566,11 +566,11 @@ public class TestBlockManager {
 
 assertNull(Does not choose a source decommissioning node for a normal
 +  replication when all available nodes exceed the hard limit.,
-bm.chooseSourceDatanode(
-aBlock,
+bm.chooseSourceDatanodes(
+bm.getStoredBlock(aBlock),
 cntNodes,
 liveNodes,
-new NumberReplicas(),
+new NumberReplicas(), new LinkedListShort(), 1,
 UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
   }
 



[29/51] [abbrv] hadoop git commit: HDFS-7912. Erasure Coding: track BlockInfo instead of Block in UnderReplicatedBlocks and PendingReplicationBlocks. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-7912. Erasure Coding: track BlockInfo instead of Block in 
UnderReplicatedBlocks and PendingReplicationBlocks. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed82e683
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed82e683
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed82e683

Branch: refs/heads/HDFS-7285
Commit: ed82e6836f443b708a4af308195d821c2a3f6970
Parents: 904099c
Author: Jing Zhao ji...@apache.org
Authored: Tue Mar 17 10:18:50 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:59 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 47 -
 .../PendingReplicationBlocks.java   | 51 +--
 .../blockmanagement/UnderReplicatedBlocks.java  | 49 +-
 .../hdfs/server/namenode/FSDirAttrOp.java   | 10 ++--
 .../hdfs/server/namenode/FSNamesystem.java  | 21 
 .../hadoop/hdfs/server/namenode/INode.java  | 12 ++---
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  4 +-
 .../hdfs/server/namenode/NamenodeFsck.java  | 10 ++--
 .../hadoop/hdfs/server/namenode/SafeMode.java   |  3 +-
 .../blockmanagement/BlockManagerTestUtil.java   |  5 +-
 .../blockmanagement/TestBlockManager.java   |  8 +--
 .../server/blockmanagement/TestNodeCount.java   |  3 +-
 .../TestOverReplicatedBlocks.java   |  5 +-
 .../blockmanagement/TestPendingReplication.java | 19 ---
 .../TestRBWBlockInvalidation.java   |  4 +-
 .../blockmanagement/TestReplicationPolicy.java  | 53 +++-
 .../TestUnderReplicatedBlockQueues.java | 16 +++---
 .../datanode/TestReadOnlySharedStorage.java |  9 ++--
 .../namenode/TestProcessCorruptBlocks.java  |  5 +-
 19 files changed, 180 insertions(+), 154 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed82e683/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cac13b4..5cbad40 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1336,7 +1336,7 @@ public class BlockManager {
* @return number of blocks scheduled for replication during this iteration.
*/
   int computeReplicationWork(int blocksToProcess) {
-ListListBlock blocksToReplicate = null;
+ListListBlockInfo blocksToReplicate = null;
 namesystem.writeLock();
 try {
   // Choose the blocks to be replicated
@@ -1354,7 +1354,7 @@ public class BlockManager {
* @return the number of blocks scheduled for replication
*/
   @VisibleForTesting
-  int computeReplicationWorkForBlocks(ListListBlock blocksToReplicate) {
+  int computeReplicationWorkForBlocks(ListListBlockInfo blocksToReplicate) 
{
 int requiredReplication, numEffectiveReplicas;
 ListDatanodeDescriptor containingNodes;
 DatanodeDescriptor srcNode;
@@ -1368,7 +1368,7 @@ public class BlockManager {
 try {
   synchronized (neededReplications) {
 for (int priority = 0; priority  blocksToReplicate.size(); 
priority++) {
-  for (Block block : blocksToReplicate.get(priority)) {
+  for (BlockInfo block : blocksToReplicate.get(priority)) {
 // block should belong to a file
 bc = blocksMap.getBlockCollection(block);
 // abandoned block or block reopened for append
@@ -1452,7 +1452,7 @@ public class BlockManager {
 }
 
 synchronized (neededReplications) {
-  Block block = rw.block;
+  BlockInfo block = rw.block;
   int priority = rw.priority;
   // Recheck since global lock was released
   // block should belong to a file
@@ -1710,7 +1710,7 @@ public class BlockManager {
* and put them back into the neededReplication queue
*/
   private void processPendingReplications() {
-Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
+BlockInfo[] timedOutItems = pendingReplications.getTimedOutBlocks();
 if (timedOutItems != null) {
   namesystem.writeLock();
   try {
@@ -2883,13 +2883,13 @@ public class BlockManager {
   
   /** Set replication for the blocks. */
   public void setReplication(final short oldRepl, final short newRepl,
-  final String src, final Block... blocks) {
+  final String src, 

[21/51] [abbrv] hadoop git commit: HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1af3d5d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1af3d5d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1af3d5d2

Branch: refs/heads/HDFS-7285
Commit: 1af3d5d2d30a03092400b0fef6be789f3c239363
Parents: 565cef0
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 2 13:44:33 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:44 2015 -0700

--
 .../server/blockmanagement/BlockIdManager.java  |  31 +++-
 .../hdfs/server/blockmanagement/BlockInfo.java  |   4 +-
 .../blockmanagement/BlockInfoContiguous.java|   5 +
 .../blockmanagement/BlockInfoStriped.java   |   8 +-
 .../server/blockmanagement/BlockManager.java|  44 --
 .../hdfs/server/blockmanagement/BlocksMap.java  |  20 ++-
 .../blockmanagement/DecommissionManager.java|   9 +-
 .../hdfs/server/namenode/FSDirectory.java   |  27 +++-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  69 ++---
 .../hdfs/server/namenode/FSImageFormat.java |  12 +-
 .../server/namenode/FSImageFormatPBINode.java   |   5 +-
 .../server/namenode/FSImageFormatProtobuf.java  |   9 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  27 ++--
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  25 +++-
 .../server/namenode/NameNodeLayoutVersion.java  |   3 +-
 .../hadoop-hdfs/src/main/proto/fsimage.proto|   1 +
 .../hdfs/server/namenode/TestAddBlockgroup.java |  85 ---
 .../server/namenode/TestAddStripedBlocks.java   | 146 +++
 18 files changed, 351 insertions(+), 179 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1af3d5d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 3ae54ce..1d69d74 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -103,21 +103,38 @@ public class BlockIdManager {
   }
 
   /**
-   * Sets the maximum allocated block ID for this filesystem. This is
+   * Sets the maximum allocated contiguous block ID for this filesystem. This 
is
* the basis for allocating new block IDs.
*/
-  public void setLastAllocatedBlockId(long blockId) {
+  public void setLastAllocatedContiguousBlockId(long blockId) {
 blockIdGenerator.skipTo(blockId);
   }
 
   /**
-   * Gets the maximum sequentially allocated block ID for this filesystem
+   * Gets the maximum sequentially allocated contiguous block ID for this
+   * filesystem
*/
-  public long getLastAllocatedBlockId() {
+  public long getLastAllocatedContiguousBlockId() {
 return blockIdGenerator.getCurrentValue();
   }
 
   /**
+   * Sets the maximum allocated striped block ID for this filesystem. This is
+   * the basis for allocating new block IDs.
+   */
+  public void setLastAllocatedStripedBlockId(long blockId) {
+blockGroupIdGenerator.skipTo(blockId);
+  }
+
+  /**
+   * Gets the maximum sequentially allocated striped block ID for this
+   * filesystem
+   */
+  public long getLastAllocatedStripedBlockId() {
+return blockGroupIdGenerator.getCurrentValue();
+  }
+
+  /**
* Sets the current generation stamp for legacy blocks
*/
   public void setGenerationStampV1(long stamp) {
@@ -188,11 +205,11 @@ public class BlockIdManager {
   /**
* Increments, logs and then returns the block ID
*/
-  public long nextBlockId() {
+  public long nextContiguousBlockId() {
 return blockIdGenerator.nextValue();
   }
 
-  public long nextBlockGroupId() {
+  public long nextStripedBlockId() {
 return blockGroupIdGenerator.nextValue();
   }
 
@@ -216,7 +233,7 @@ public class BlockIdManager {
 return id  0;
   }
 
-  public static long convertToGroupID(long id) {
+  public static long convertToStripedID(long id) {
 return id  (~HdfsConstants.BLOCK_GROUP_INDEX_MASK);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1af3d5d2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfo.java
 

[25/51] [abbrv] hadoop git commit: HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from striped files. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from striped 
files. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2e853a67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2e853a67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2e853a67

Branch: refs/heads/HDFS-7285
Commit: 2e853a6700dc81b68984d2af9966564e409f14af
Parents: c8d4c25
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 9 14:59:58 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:58 2015 -0700

--
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   5 +-
 .../hdfs/protocol/LocatedStripedBlock.java  |  68 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  13 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |   2 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  80 +++
 .../blockmanagement/BlockInfoStriped.java   |   5 +
 .../BlockInfoStripedUnderConstruction.java  |  99 +++--
 .../server/blockmanagement/BlockManager.java|  51 ---
 .../blockmanagement/DatanodeDescriptor.java |   4 +-
 .../blockmanagement/DatanodeStorageInfo.java|   3 +-
 .../server/namenode/FSImageFormatPBINode.java   |  21 +--
 .../hdfs/server/namenode/FSNamesystem.java  |  34 +++--
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |   1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  12 ++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  16 +--
 .../datanode/TestIncrementalBrVariations.java   |  14 +-
 .../server/namenode/TestAddStripedBlocks.java   | 141 +++
 .../hdfs/server/namenode/TestFSImage.java   |   5 +-
 20 files changed, 444 insertions(+), 146 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853a67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index e729869..a38e8f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -25,7 +25,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfoWithStorage;
 import org.apache.hadoop.security.token.Token;
 
 import com.google.common.collect.Lists;
@@ -51,14 +50,14 @@ public class LocatedBlock {
   // else false. If block has few corrupt replicas, they are filtered and 
   // their locations are not part of this object
   private boolean corrupt;
-  private TokenBlockTokenIdentifier blockToken = new 
TokenBlockTokenIdentifier();
+  private TokenBlockTokenIdentifier blockToken = new Token();
   /**
* List of cached datanode locations
*/
   private DatanodeInfo[] cachedLocs;
 
   // Used when there are no locations
-  private static final DatanodeInfoWithStorage[] EMPTY_LOCS =
+  static final DatanodeInfoWithStorage[] EMPTY_LOCS =
   new DatanodeInfoWithStorage[0];
 
   public LocatedBlock(ExtendedBlock b, DatanodeInfo[] locs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2e853a67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
new file mode 100644
index 000..97e3a69
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by 

[08/51] [abbrv] hadoop git commit: HDFS-7347. Configurable erasure coding policy for individual files and directories ( Contributed by Zhe Zhang )

2015-04-06 Thread zhz
HDFS-7347. Configurable erasure coding policy for individual files and 
directories ( Contributed by Zhe Zhang )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d552fda8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d552fda8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d552fda8

Branch: refs/heads/HDFS-7285
Commit: d552fda896fd19880f902c08814e8d898f9b3109
Parents: 28bebc8
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Nov 6 10:03:26 2014 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 09:31:54 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 ++
 .../hadoop/hdfs/protocol/HdfsConstants.java |  2 +
 .../BlockStoragePolicySuite.java|  5 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java | 12 +++-
 .../TestBlockInitialEncoding.java   | 75 
 5 files changed, 95 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d552fda8/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..2ef8527
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
+
+HDFS-7347. Configurable erasure coding policy for individual files and
+directories ( Zhe Zhang via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d552fda8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 7cf8a47..54c650b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -171,6 +171,7 @@ public class HdfsConstants {
   public static final String ONESSD_STORAGE_POLICY_NAME = ONE_SSD;
   public static final String HOT_STORAGE_POLICY_NAME = HOT;
   public static final String WARM_STORAGE_POLICY_NAME = WARM;
+  public static final String EC_STORAGE_POLICY_NAME = EC;
   public static final String COLD_STORAGE_POLICY_NAME = COLD;
 
   public static final byte MEMORY_STORAGE_POLICY_ID = 15;
@@ -178,5 +179,6 @@ public class HdfsConstants {
   public static final byte ONESSD_STORAGE_POLICY_ID = 10;
   public static final byte HOT_STORAGE_POLICY_ID = 7;
   public static final byte WARM_STORAGE_POLICY_ID = 5;
+  public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d552fda8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
index 020cb5f..3d121cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockStoragePolicySuite.java
@@ -78,6 +78,11 @@ public class BlockStoragePolicySuite {
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE},
 new StorageType[]{StorageType.DISK, StorageType.ARCHIVE});
+final byte ecId = HdfsConstants.EC_STORAGE_POLICY_ID;
+policies[ecId] = new BlockStoragePolicy(ecId,
+HdfsConstants.EC_STORAGE_POLICY_NAME,
+new StorageType[]{StorageType.DISK}, StorageType.EMPTY_ARRAY,
+new StorageType[]{StorageType.ARCHIVE});
 final byte coldId = HdfsConstants.COLD_STORAGE_POLICY_ID;
 policies[coldId] = new BlockStoragePolicy(coldId,
 HdfsConstants.COLD_STORAGE_POLICY_NAME,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d552fda8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
--
diff --git 

[49/51] [abbrv] hadoop git commit: HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui Zheng.

2015-04-06 Thread zhz
HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui 
Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/86f579e1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/86f579e1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/86f579e1

Branch: refs/heads/HDFS-7285
Commit: 86f579e1f03d859b1fd3ed4508125a920bdda006
Parents: 18f3585
Author: Zhe Zhang z...@apache.org
Authored: Tue Mar 31 10:46:04 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:04 2015 -0700

--
 .../server/namenode/TestFSEditLogLoader.java| 157 +++
 1 file changed, 157 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/86f579e1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 833ef95..d3cb749 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -39,14 +39,18 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
@@ -414,4 +418,157 @@ public class TestFSEditLogLoader {
   fromByte(code), FSEditLogOpCodes.fromByte(code));
 }
   }
+
+  @Test
+  public void testAddNewStripedBlock() throws IOException{
+// start a cluster
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  FSNamesystem fns = cluster.getNamesystem();
+
+  String testDir = /ec;
+  String testFile = testfile_001;
+  String testFilePath = testDir + / + testFile;
+  String clientName = testUser1;
+  String clientMachine = testMachine1;
+  long blkId = 1;
+  long blkNumBytes = 1024;
+  long timestamp = 1426222918;
+  short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
+  short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
+
+  //set the storage policy of the directory
+  fs.mkdir(new Path(testDir), new FsPermission(755));
+  fs.setStoragePolicy(new Path(testDir),
+  HdfsConstants.EC_STORAGE_POLICY_NAME);
+
+  // Create a file with striped block
+  Path p = new Path(testFilePath);
+  DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
+
+  fns.enterSafeMode(false);
+  fns.saveNamespace(0, 0);
+  fns.leaveSafeMode();
+
+  // Add a striped block to the file
+  BlockInfoStriped stripedBlk = new BlockInfoStriped(
+  new Block(blkId, blkNumBytes, timestamp), blockNum, parityNum);
+  INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
+  file.toUnderConstruction(clientName, clientMachine);
+  file.getStripedBlocksFeature().addBlock(stripedBlk);
+  fns.getEditLog().logAddBlock(testFilePath, file);
+  file.toCompleteFile(System.currentTimeMillis());
+
+  //If the block by loaded is the same as above it means that
+  //we have successfully applied the edit log to the fsimage.
+  cluster.restartNameNodes();
+  cluster.waitActive();
+  fns = cluster.getNamesystem();
+
+  INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
+  .getINode(testFilePath);
+
+  assertTrue(inodeLoaded.isWithStripedBlocks());
+
+  BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks();
+  assertEquals(1, 

[11/51] [abbrv] hadoop git commit: HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe Zhang

2015-04-06 Thread zhz
HDFS-7652. Process block reports for erasure coded blocks. Contributed by Zhe 
Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e39e00c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e39e00c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e39e00c

Branch: refs/heads/HDFS-7285
Commit: 7e39e00c1012b877a7b8b29adf8e5a7250f542b5
Parents: 2b2859e
Author: Zhe Zhang z...@apache.org
Authored: Mon Feb 9 10:27:14 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:01:04 2015 -0700

--
 .../server/blockmanagement/BlockIdManager.java|  8 
 .../hdfs/server/blockmanagement/BlockManager.java | 18 +-
 2 files changed, 21 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e39e00c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index c8b9d20..e7f8a05 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -211,4 +211,12 @@ public class BlockIdManager {
   .LAST_RESERVED_BLOCK_ID);
 generationStampV1Limit = GenerationStamp.GRANDFATHER_GENERATION_STAMP;
   }
+
+  public static boolean isStripedBlockID(long id) {
+return id  0;
+  }
+
+  public static long convertToGroupID(long id) {
+return id  (~(HdfsConstants.MAX_BLOCKS_IN_GROUP - 1));
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e39e00c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 524afa0..97dd2c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1925,7 +1925,7 @@ public class BlockManager {
   break;
 }
 
-BlockInfoContiguous bi = blocksMap.getStoredBlock(b);
+BlockInfoContiguous bi = getStoredBlock(b);
 if (bi == null) {
   if (LOG.isDebugEnabled()) {
 LOG.debug(BLOCK* rescanPostponedMisreplicatedBlocks:  +
@@ -2068,7 +2068,7 @@ public class BlockManager {
 continue;
   }
   
-  BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(iblk);
+  BlockInfoContiguous storedBlock = getStoredBlock(iblk);
   // If block does not belong to any file, we are done.
   if (storedBlock == null) continue;
   
@@ -2208,7 +2208,7 @@ public class BlockManager {
 }
 
 // find block by blockId
-BlockInfoContiguous storedBlock = blocksMap.getStoredBlock(block);
+BlockInfoContiguous storedBlock = getStoredBlock(block);
 if(storedBlock == null) {
   // If blocksMap does not contain reported block id,
   // the replica should be removed from the data-node.
@@ -2499,7 +2499,7 @@ public class BlockManager {
 DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
 if (block instanceof BlockInfoContiguousUnderConstruction) {
   //refresh our copy in case the block got completed in another thread
-  storedBlock = blocksMap.getStoredBlock(block);
+  storedBlock = getStoredBlock(block);
 } else {
   storedBlock = block;
 }
@@ -3361,7 +3361,15 @@ public class BlockManager {
   }
 
   public BlockInfoContiguous getStoredBlock(Block block) {
-return blocksMap.getStoredBlock(block);
+BlockInfoContiguous info = null;
+if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  info = blocksMap.getStoredBlock(
+  new Block(BlockIdManager.convertToGroupID(block.getBlockId(;
+}
+if (info == null) {
+  info = blocksMap.getStoredBlock(block);
+}
+return info;
   }
 
   /** updates a block in under replication queue */



[33/51] [abbrv] hadoop git commit: HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai Zheng

2015-04-06 Thread zhz
HADOOP-11707. Add factory to create raw erasure coder.  Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6d2f21b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6d2f21b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6d2f21b

Branch: refs/heads/HDFS-7285
Commit: c6d2f21b97851b7b658ac8ba6dbebc726cff88dc
Parents: 527ddbd
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 20 15:07:00 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:00 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +-
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ++
 .../rawcoder/RawErasureCoderFactory.java| 38 
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ++
 4 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6d2f21b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index e27ff5c..f566f0e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -24,4 +24,5 @@
 HADOOP-11706. Refine a little bit erasure coder API. Contributed by Kai 
Zheng
 ( Kai Zheng )
 
-
+HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6d2f21b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
new file mode 100644
index 000..d6b40aa
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+/**
+ * A raw coder factory for raw Reed-Solomon coder in Java.
+ */
+public class JRSRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  @Override
+  public RawErasureEncoder createEncoder() {
+return new JRSRawEncoder();
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder() {
+return new JRSRawDecoder();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6d2f21b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
new file mode 100644
index 000..95a1cfe
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in 

[36/51] [abbrv] hadoop git commit: HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng

2015-04-06 Thread zhz
HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a129ddaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a129ddaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a129ddaf

Branch: refs/heads/HDFS-7285
Commit: a129ddaffc26c3f3ba3a212b405ad7b6add16d10
Parents: c6d2f21
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 20 19:15:52 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:01 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/fs/CommonConfigurationKeys.java  | 15 
 .../erasurecode/coder/AbstractErasureCoder.java | 65 ++
 .../coder/AbstractErasureDecoder.java   |  6 +-
 .../coder/AbstractErasureEncoder.java   |  6 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  | 83 ++
 .../io/erasurecode/coder/RSErasureEncoder.java  | 47 ++
 .../io/erasurecode/coder/XorErasureDecoder.java |  2 +-
 .../io/erasurecode/coder/XorErasureEncoder.java |  2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   | 92 
 10 files changed, 315 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129ddaf/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index f566f0e..b69e69a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -26,3 +26,6 @@
 
 HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129ddaf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 7575496..70fea01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -135,6 +135,21 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   false;
 
   /**
+   * Erasure Coding configuration family
+   */
+
+  /** Supported erasure codec classes */
+  public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
+
+  /** Use XOR raw coder when possible for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
+  io.erasurecode.codec.rs.usexor;
+
+  /** Raw coder factory for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
+  io.erasurecode.codec.rs.rawcoder;
+
+  /**
* Service Authorization
*/
   public static final String 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a129ddaf/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index 8d3bc34..0e4de89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -31,6 +36,66 @@ public abstract class AbstractErasureCoder
   private int numParityUnits;
   private int chunkSize;
 
+  /**
+   * Create raw decoder using the factory specified by 

[43/51] [abbrv] hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62d49a84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62d49a84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62d49a84

Branch: refs/heads/HDFS-7285
Commit: 62d49a84d13a3ff7f63281a318a1940acee64743
Parents: 0767bd7
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:03 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 138 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 292 insertions(+), 213 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d49a84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e05330c..f42adcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -538,7 +538,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedListShort(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedListShort(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1376,7 +1376,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(ListListBlockInfo blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 ListDatanodeDescriptor containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1404,13 +1404,10 @@ public class BlockManager {
 containingNodes = new ArrayList();
 ListDatanodeStorageInfo liveReplicaNodes = new ArrayList();
 NumberReplicas numReplicas = new NumberReplicas();
-ListShort missingBlockIndices = new LinkedList();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+ListShort liveBlockIndices = new ArrayList();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug(Block  + block +  cannot be recovered  +
@@ -1442,15 +1439,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i  liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i  missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, 

[24/51] [abbrv] hadoop git commit: HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng

2015-04-06 Thread zhz
HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db8a83ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db8a83ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db8a83ac

Branch: refs/heads/HDFS-7285
Commit: db8a83ac0027804d069aecb89f7460c1ff8740b4
Parents: 2e853a6
Author: drankye kai.zh...@intel.com
Authored: Thu Mar 12 23:35:22 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:58 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  4 +++
 .../erasurecode/coder/AbstractErasureCoder.java |  5 ++-
 .../rawcoder/AbstractRawErasureCoder.java   |  5 ++-
 .../hadoop/io/erasurecode/TestCoderBase.java|  6 
 .../erasurecode/coder/TestErasureCoderBase.java | 36 +---
 .../erasurecode/rawcoder/TestRawCoderBase.java  | 13 +--
 6 files changed, 60 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db8a83ac/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c17a1bd..a97dc34 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -18,3 +18,7 @@
 HADOOP-11646. Erasure Coder API for encoding and decoding of block group
 ( Kai Zheng via vinayakumarb )
 
+HADOOP-11705. Make erasure coder configurable. Contributed by Kai Zheng
+( Kai Zheng )
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db8a83ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index f2cc041..8d3bc34 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configured;
+
 /**
  * A common class of basic facilities to be shared by encoder and decoder
  *
  * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureCoder implements ErasureCoder {
+public abstract class AbstractErasureCoder
+extends Configured implements ErasureCoder {
 
   private int numDataUnits;
   private int numParityUnits;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db8a83ac/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 74d2ab6..e6f3d92 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -17,12 +17,15 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
+import org.apache.hadoop.conf.Configured;
+
 /**
  * A common class of basic facilities to be shared by encoder and decoder
  *
  * It implements the {@link RawErasureCoder} interface.
  */
-public abstract class AbstractRawErasureCoder implements RawErasureCoder {
+public abstract class AbstractRawErasureCoder
+extends Configured implements RawErasureCoder {
 
   private int numDataUnits;
   private int numParityUnits;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db8a83ac/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 3c4288c..194413a 100644
--- 

[28/51] [abbrv] hadoop git commit: Fixed a compiling issue introduced by HADOOP-11705.

2015-04-06 Thread zhz
Fixed a compiling issue introduced by HADOOP-11705.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aac871d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aac871d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aac871d3

Branch: refs/heads/HDFS-7285
Commit: aac871d3d5f3e3732573946745fbc7d576e79db0
Parents: db8a83a
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 13 00:13:06 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:59 2015 -0700

--
 .../apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aac871d3/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
index 36e061a..d911db9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/coder/TestErasureCoderBase.java
@@ -162,7 +162,7 @@ public abstract class TestErasureCoderBase extends 
TestCoderBase {
 }
 
 encoder.initialize(numDataUnits, numParityUnits, chunkSize);
-encoder.setConf(conf);
+((AbstractErasureCoder)encoder).setConf(conf);
 return encoder;
   }
 
@@ -179,7 +179,7 @@ public abstract class TestErasureCoderBase extends 
TestCoderBase {
 }
 
 decoder.initialize(numDataUnits, numParityUnits, chunkSize);
-decoder.setConf(conf);
+((AbstractErasureCoder)decoder).setConf(conf);
 return decoder;
   }
 



[32/51] [abbrv] hadoop git commit: HDFS-7369. Erasure coding: distribute recovery work for striped blocks to DataNode. Contributed by Zhe Zhang.

2015-04-06 Thread zhz
HDFS-7369. Erasure coding: distribute recovery work for striped blocks to 
DataNode. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/527ddbd3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/527ddbd3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/527ddbd3

Branch: refs/heads/HDFS-7285
Commit: 527ddbd3843ae4fb85eedba3ccbb6df48cc6bee0
Parents: 08a4c9e
Author: Zhe Zhang z...@apache.org
Authored: Wed Mar 18 15:52:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:00 2015 -0700

--
 .../server/blockmanagement/BlockCollection.java |   5 +
 .../server/blockmanagement/BlockManager.java| 294 +--
 .../blockmanagement/DatanodeDescriptor.java |  72 -
 .../server/blockmanagement/DatanodeManager.java |  20 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   9 +-
 .../server/protocol/BlockECRecoveryCommand.java |  63 
 .../hdfs/server/protocol/DatanodeProtocol.java  |   1 +
 .../blockmanagement/BlockManagerTestUtil.java   |   2 +-
 .../blockmanagement/TestBlockManager.java   |  22 +-
 .../TestRecoverStripedBlocks.java   | 107 +++
 10 files changed, 486 insertions(+), 109 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/527ddbd3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 440a081..50dd17b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -86,4 +86,9 @@ public interface BlockCollection {
* @return whether the block collection is under construction.
*/
   public boolean isUnderConstruction();
+
+  /**
+   * @return whether the block collection is in striping format
+   */
+  public boolean isStriped();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/527ddbd3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5cbad40..291347b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -531,9 +532,9 @@ public class BlockManager {
 
 NumberReplicas numReplicas = new NumberReplicas();
 // source node returned is not used
-chooseSourceDatanode(block, containingNodes,
+chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-UnderReplicatedBlocks.LEVEL);
+new LinkedListShort(), 1, UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1327,15 +1328,15 @@ public class BlockManager {
   }
 
   /**
-   * Scan blocks in {@link #neededReplications} and assign replication
-   * work to data-nodes they belong to.
+   * Scan blocks in {@link #neededReplications} and assign recovery
+   * (replication or erasure coding) work to data-nodes they belong to.
*
* The number of process blocks equals either twice the number of live
* data-nodes or the number of under-replicated blocks whichever is less.
*
* @return number of blocks scheduled for replication during this iteration.
*/
-  int computeReplicationWork(int blocksToProcess) {
+  int computeBlockRecoveryWork(int blocksToProcess) {
 ListListBlockInfo blocksToReplicate = null;
 namesystem.writeLock();
 try {
@@ -1345,30 +1346,32 @@ public class 

[51/51] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit is for HDFS-8035). Contributed by Zhe Zhang

2015-04-06 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit is for HDFS-8035). Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6ecbaaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6ecbaaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6ecbaaf

Branch: refs/heads/HDFS-7285
Commit: e6ecbaafd940824bd0c9d0f20b524d70f7455c1c
Parents: bdb67d9
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 6 10:37:23 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:37:23 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 11 +--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java | 10 +-
 2 files changed, 10 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6ecbaaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bce789a..d30e623 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3529,13 +3529,12 @@ public class BlockManager {
   String src, BlockInfo[] blocks) {
 for (BlockInfo b: blocks) {
   if (!b.isComplete()) {
-final BlockInfoContiguousUnderConstruction uc =
-(BlockInfoContiguousUnderConstruction)b;
 final int numNodes = b.numNodes();
-LOG.info(BLOCK*  + b +  is not COMPLETE (ucState = 
-  + uc.getBlockUCState() + , replication# =  + numNodes
-  + (numNodes  minReplication ?   :  = )
-  +  minimum =  + minReplication + ) in file  + src);
+final int min = getMinStorageNum(b);
+final BlockUCState state = b.getBlockUCState();
+LOG.info(BLOCK*  + b +  is not COMPLETE (ucState =  + state
++ , replication# =  + numNodes + (numNodes  min ?:  = 
)
++  minimum =  + min + ) in file  + src);
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6ecbaaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6fb258a..009e5a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3100,7 +3100,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 
 // Part II.
 // Allocate a new block, add it to the INode and the BlocksMap. 
-BlockInfo newBlockInfo = null;
+Block newBlock = null;
 long offset;
 checkOperation(OperationCategory.WRITE);
 waitForLoadingFSImage();
@@ -3133,8 +3133,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 ExtendedBlock.getLocalBlock(previous));
 
   // allocate new block, record block locations in INode.
-  Block newBlock = createNewBlock(isStriped);
-  newBlockInfo = saveAllocatedBlock(src, fileState.iip, newBlock, targets,
+  newBlock = createNewBlock(isStriped);
+  saveAllocatedBlock(src, fileState.iip, newBlock, targets,
   isStriped);
 
   persistNewBlock(src, pendingFile);
@@ -3145,7 +3145,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 getEditLog().logSync();
 
 // Return located block
-return makeLocatedBlock(newBlockInfo, targets, offset);
+return makeLocatedBlock(getStoredBlock(newBlock), targets, offset);
   }
 
   /*
@@ -3602,7 +3602,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 BlockInfo b = v.getPenultimateBlock();
 return b == null ||
 blockManager.checkBlocksProperlyReplicated(
-src, new BlockInfoContiguous[] { b });
+src, new BlockInfo[] { b });
   }
 } finally {
   readUnlock();



[47/51] [abbrv] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin

2015-04-06 Thread zhz
HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0800245d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0800245d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0800245d

Branch: refs/heads/HDFS-7285
Commit: 0800245d16f71e919a944b0548622353a75f1a87
Parents: 86f579e
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 2 05:12:35 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:04 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 .../src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0800245d/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b69e69a..01280db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -29,3 +29,6 @@
 
 HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
 ( Kai Zheng )
+
+HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
+( Xinwei Qin via Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0800245d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8dc3f45..27be00e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -64,7 +64,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  options.get(NUM_DATA_UNITS_KEY) +  for  + NUM_DATA_UNITS_KEY +
is found. It should be an integer);
 }
 
@@ -74,7 +74,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  options.get(NUM_PARITY_UNITS_KEY) +  for  + NUM_PARITY_UNITS_KEY +
is found. It should be an integer);
 }
 



[44/51] [abbrv] hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-04-06 Thread zhz
HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18f35854
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18f35854
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18f35854

Branch: refs/heads/HDFS-7285
Commit: 18f35854a5196c8c1676a9b4eb5cd268e65c4fb4
Parents: 62d49a8
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Tue Mar 31 15:12:09 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:03 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 40 +++-
 1 file changed, 39 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18f35854/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 21e4c03..a686315 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -3,6 +3,44 @@
 HDFS-7347. Configurable erasure coding policy for individual files and
 directories ( Zhe Zhang via vinayakumarb )
 
-HDFS-7716. Add a test for BlockGroup support in FSImage.
+HDFS-7339. Representing striped block groups in NameNode with hierarchical
+naming protocol ( Zhe Zhang )
+
+HDFS-7652. Process block reports for erasure coded blocks (Zhe Zhang)
+
+HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info (Jing Zhao)
+
+HDFS-7749. Erasure Coding: Add striped block support in INodeFile (Jing 
Zhao)
+
+HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode
+(Jing Zhao via Zhe Zhang)
+
+HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to 
print
+striped blocks (Takuya Fukudome via jing9)
+
+HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from
+striped files (Jing Zhao)
+
+HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped
+blocks ( Kai Sasaki via jing9 )
+
+HDFS-7912. Erasure Coding: track BlockInfo instead of Block in
+UnderReplicatedBlocks and PendingReplicationBlocks (Jing Zhao)
+
+HDFS-7369. Erasure coding: distribute recovery work for striped blocks to
+DataNode (Zhe Zhang)
+
+HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks
+(GAO Rui via jing9)
+
+HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage
+( Hui Zheng via jing9 )
+
+HDFS-7616. Add a test for BlockGroup support in FSImage.
 (Takuya Fukudome via szetszwo)
 
+HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery 
striped
+blocks in NameNode (Jing Zhao)
+
+HDFS-8005. Erasure Coding: simplify striped block recovery work computation
+and add tests (Jing Zhao)
\ No newline at end of file



[02/51] [abbrv] hadoop git commit: HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.

2015-04-06 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b3948ea/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
new file mode 100644
index 000..5514700
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
@@ -0,0 +1,19520 @@
+?xml version=1.0 encoding=iso-8859-1 standalone=no?
+!-- Generated by the JDiff Javadoc doclet --
+!-- (http://www.jdiff.org) --
+!-- on Mon Mar 30 15:30:43 PDT 2015 --
+
+api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name=hadoop-hdfs 2.6.0
+  jdversion=1.0.9
+
+!--  Command line arguments =  -doclet 
org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet 
-docletpath 
/Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar
 -verbose -classpath 
/Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/llu/hadoop-common/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.6.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-ker
 
beros-codec-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/llu/.m2/repository/org/apache/curator/curator-framework/2.6.0/curator-framework-2.6.0.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/hadoop-common-2.6.0.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/com/sun/jerse
 
y/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/tomcat/jasper-compiler/5.5.23/jasper-compiler-5.5.23.jar:/Users/llu/.m2/repository/commons-el/commons-el/1.0/commons-el-1.0.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-configuration/commons-configuratio
 
n/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/llu/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/llu/.m2/repository/org/apache/curator/curator-client/2.6.0/curator-client-2.6.0.jar:/Users/llu/.m2/repository/org/apache/curator/curator-recipes/2.6.0/curator-recipes-2.6.0.jar:/Users/llu/.m2/repository/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar:/Users/llu/.m2/repository/
 

[05/51] [abbrv] hadoop git commit: YARN-3435. AM container to be allocated Appattempt AM container shown as null. Contributed by Bibin A Chundatt

2015-04-06 Thread zhz
YARN-3435. AM container to be allocated Appattempt AM container shown as
null. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96d72118
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96d72118
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96d72118

Branch: refs/heads/HDFS-7285
Commit: 96d72118f5c81aa4e0dca0dc0241fbf1a3fff4d2
Parents: 4b3948e
Author: Xuan xg...@apache.org
Authored: Sun Apr 5 00:40:57 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Sun Apr 5 00:40:57 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96d72118/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 594833d..b1f8913 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -142,6 +142,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3415. Non-AM containers can be counted towards amResourceUsage of a
 Fair Scheduler queue (Zhihai Xu via Sandy Ryza)
 
+YARN-3435. AM container to be allocated Appattempt AM container shown as 
null.
+(Bibin A Chundatt via xgong)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96d72118/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 1831920..506e31f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
@@ -209,7 +209,7 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
 AM Container:,
 appAttempt.getAmContainerId() == null || containers == null
 || !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
-? null : root_url(container, appAttempt.getAmContainerId()),
+? N/A : root_url(container, appAttempt.getAmContainerId()),
 String.valueOf(appAttempt.getAmContainerId()))
   ._(Node:, node)
   ._(



[10/51] [abbrv] hadoop git commit: Fix Compilation Error in TestAddBlockgroup.java after the merge

2015-04-06 Thread zhz
Fix Compilation Error in TestAddBlockgroup.java after the merge


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dd5fd7b6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dd5fd7b6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dd5fd7b6

Branch: refs/heads/HDFS-7285
Commit: dd5fd7b612241a371d573c60714f0f11de97dbf7
Parents: 7e39e00
Author: Jing Zhao ji...@apache.org
Authored: Sun Feb 8 16:01:03 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:01:04 2015 -0700

--
 .../apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dd5fd7b6/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
index 95133ce..06dfade 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockgroup.java
@@ -26,7 +26,7 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -75,7 +75,7 @@ public class TestAddBlockgroup {
 final Path file1 = new Path(/file1);
 DFSTestUtil.createFile(fs, file1, BLOCKSIZE * 2, REPLICATION, 0L);
 INodeFile file1Node = fsdir.getINode4Write(file1.toString()).asFile();
-BlockInfo[] file1Blocks = file1Node.getBlocks();
+BlockInfoContiguous[] file1Blocks = file1Node.getBlocks();
 assertEquals(2, file1Blocks.length);
 assertEquals(GROUP_SIZE, file1Blocks[0].numNodes());
 assertEquals(HdfsConstants.MAX_BLOCKS_IN_GROUP,



[01/51] [abbrv] hadoop git commit: HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.

2015-04-06 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 8525fdc5a - e6ecbaafd (forced update)


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b3948ea/hadoop-project-dist/pom.xml
--
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index 33f5a04..d206593 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -165,8 +165,10 @@
 activeByDefaultfalse/activeByDefault
   /activation
   properties
-jdiff.stable.api0.20.2/jdiff.stable.api
+jdiff.stable.api2.6.0/jdiff.stable.api
 jdiff.stability-unstable/jdiff.stability
+!-- Commented out for HADOOP-11776 --
+!-- Uncomment param name=${jdiff.compatibility} in javadoc doclet 
if compatibility is not empty --
 jdiff.compatibility/jdiff.compatibility
 jdiff.javadoc.maxmemory512m/jdiff.javadoc.maxmemory
   /properties
@@ -227,6 +229,14 @@
   
outputDirectory${project.build.directory}/outputDirectory
   destFileNamehadoop-annotations.jar/destFileName
 /artifactItem
+artifactItem
+  groupIdxerces/groupId
+  artifactIdxercesImpl/artifactId
+  version${xerces.jdiff.version}/version
+  overWritefalse/overWrite
+  
outputDirectory${project.build.directory}/outputDirectory
+  destFileNamexerces.jar/destFileName
+/artifactItem
   /artifactItems
 /configuration
   /execution
@@ -276,7 +286,7 @@
   doclet 
name=org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet
   
path=${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar
 param name=-apidir 
value=${project.build.directory}/site/jdiff/xml/
-param name=-apiname value=hadoop-core 
${project.version}/
+param name=-apiname value=${project.name} 
${project.version}/
 param name=${jdiff.stability}/
   /doclet
   packageset dir=${basedir}/src/main/java/
@@ -289,17 +299,17 @@
  
sourceFiles=${basedir}/dev-support/jdiff/Null.java
  maxmemory=${jdiff.javadoc.maxmemory}
   doclet 
name=org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet
-  
path=${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar
-param name=-oldapi value=hadoop-core 
${jdiff.stable.api}/
-param name=-newapi value=hadoop-core 
${project.version}/
+  
path=${project.build.directory}/hadoop-annotations.jar:${project.build.directory}/jdiff.jar:${project.build.directory}/xerces.jar
+param name=-oldapi value=${project.name} 
${jdiff.stable.api}/
+param name=-newapi value=${project.name} 
${project.version}/
 param name=-oldapidir 
value=${basedir}/dev-support/jdiff/
 param name=-newapidir 
value=${project.build.directory}/site/jdiff/xml/
 param name=-javadocold
-   
value=http://hadoop.apache.org/docs/${jdiff.stable.api}/api//
+   
value=http://hadoop.apache.org/docs/r${jdiff.stable.api}/api//
 param name=-javadocnew 
value=${project.build.directory}/site/api/
 param name=-stats/
 param name=${jdiff.stability}/
-param name=${jdiff.compatibility}/
+!--param name=${jdiff.compatibility}/--
   /doclet
   classpath
 path refid=maven.compile.classpath/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b3948ea/hadoop-project/pom.xml
--
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index 739cae0..a59ec06 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -42,6 +42,8 @@
 !-- These 2 versions are defined here becuase they are used --
 !-- JDIFF generation from embedded ant in the antrun plugin --
 jdiff.version1.0.9/jdiff.version
+!-- Version number for xerces used by JDiff --
+xerces.jdiff.version2.11.0/xerces.jdiff.version
 
 hadoop.assemblies.version${project.version}/hadoop.assemblies.version
 



[42/51] [abbrv] hadoop git commit: HADOOP-11664. Loading predefined EC schemas from configuration. Contributed by Kai Zheng.

2015-04-06 Thread zhz
HADOOP-11664. Loading predefined EC schemas from configuration. Contributed by 
Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5097a48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5097a48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5097a48

Branch: refs/heads/HDFS-7285
Commit: a5097a481b0b8e1cc9f293882b2a5fa60075b6e1
Parents: d3ccfa9
Author: Zhe Zhang z...@apache.org
Authored: Fri Mar 27 14:52:50 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:02 2015 -0700

--
 .../src/main/conf/ecschema-def.xml  |  40 +
 .../hadoop/fs/CommonConfigurationKeys.java  |   5 +
 .../hadoop/io/erasurecode/SchemaLoader.java | 147 +++
 .../hadoop/io/erasurecode/TestSchemaLoader.java |  80 ++
 4 files changed, 272 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5097a48/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
new file mode 100644
index 000..e619485
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -0,0 +1,40 @@
+?xml version=1.0?
+
+!--
+ 
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ License); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an AS IS BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+--
+
+!--
+Please define your EC schemas here. Note, once these schemas are loaded
+and referenced by EC storage policies, any change to them will be ignored.
+You can modify and remove those not used yet, or add new ones.
+--
+
+schemas
+  schema name=RS-6-3
+k6/k
+m3/m
+codecRS/codec
+  /schema
+  schema name=RS-10-4
+k10/k
+m4/m
+codecRS/codec
+  /schema
+/schemas
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5097a48/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 70fea01..af32674 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -141,6 +141,11 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
 
+  public static final String IO_ERASURECODE_SCHEMA_FILE_KEY =
+  io.erasurecode.schema.file;
+  public static final String IO_ERASURECODE_SCHEMA_FILE_DEFAULT =
+  ecschema-def.xml;
+
   /** Use XOR raw coder when possible for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
   io.erasurecode.codec.rs.usexor;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5097a48/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
new file mode 100644
index 000..c51ed37
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * 

[09/51] [abbrv] hadoop git commit: HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by Zhe Zhang

2015-04-06 Thread zhz
HDFS-7339. Allocating and persisting block groups in NameNode. Contributed by 
Zhe Zhang

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b2859e6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b2859e6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b2859e6

Branch: refs/heads/HDFS-7285
Commit: 2b2859e68314c626d1cac785c92ad7577425d3c4
Parents: d552fda
Author: Zhe Zhang z...@apache.org
Authored: Fri Jan 30 16:16:26 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:00:42 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  2 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |  4 +
 .../server/blockmanagement/BlockIdManager.java  |  8 +-
 .../SequentialBlockGroupIdGenerator.java| 82 +++
 .../SequentialBlockIdGenerator.java |  6 +-
 .../hdfs/server/namenode/FSDirectory.java   |  8 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 34 +---
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 11 +++
 .../hdfs/server/namenode/TestAddBlockgroup.java | 84 
 9 files changed, 223 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2859e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 610932a..eff457c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -221,6 +221,8 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final int DFS_NAMENODE_REPLICATION_INTERVAL_DEFAULT = 3;
   public static final String  DFS_NAMENODE_REPLICATION_MIN_KEY = 
dfs.namenode.replication.min;
   public static final int DFS_NAMENODE_REPLICATION_MIN_DEFAULT = 1;
+  public static final String  DFS_NAMENODE_STRIPE_MIN_KEY = 
dfs.namenode.stripe.min;
+  public static final int DFS_NAMENODE_STRIPE_MIN_DEFAULT = 1;
   public static final String  DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY 
= dfs.namenode.replication.pending.timeout-sec;
   public static final int 
DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT = -1;
   public static final String  DFS_NAMENODE_REPLICATION_MAX_STREAMS_KEY = 
dfs.namenode.replication.max-streams;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2859e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 54c650b..de60b6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -181,4 +181,8 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte EC_STORAGE_POLICY_ID = 4;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
+
+  public static final byte NUM_DATA_BLOCKS = 3;
+  public static final byte NUM_PARITY_BLOCKS = 2;
+  public static final byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b2859e6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index 1c69203..c8b9d20 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -53,10 +53,12 @@ public class 

[17/51] [abbrv] hadoop git commit: HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by 
Jing Zhao.

Conflicts:

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e3a48212
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e3a48212
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e3a48212

Branch: refs/heads/HDFS-7285
Commit: e3a48212e2ac6e1638eb06732645b809e32d1d0d
Parents: 958e8c1
Author: Jing Zhao ji...@apache.org
Authored: Tue Feb 10 17:54:10 2015 -0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:05:11 2015 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |   1 +
 .../server/blockmanagement/BlockCollection.java |  13 +-
 .../server/blockmanagement/BlockIdManager.java  |   7 +-
 .../hdfs/server/blockmanagement/BlockInfo.java  | 339 +
 .../blockmanagement/BlockInfoContiguous.java| 363 +++
 .../BlockInfoContiguousUnderConstruction.java   | 137 +--
 .../blockmanagement/BlockInfoStriped.java   | 179 +
 .../server/blockmanagement/BlockManager.java| 188 +-
 .../hdfs/server/blockmanagement/BlocksMap.java  |  46 +--
 .../CacheReplicationMonitor.java|  10 +-
 .../blockmanagement/DatanodeDescriptor.java |  22 +-
 .../blockmanagement/DatanodeStorageInfo.java|  38 +-
 .../ReplicaUnderConstruction.java   | 119 ++
 .../hdfs/server/namenode/FSDirectory.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  20 +-
 .../hdfs/server/namenode/NamenodeFsck.java  |   3 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   4 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   4 +-
 .../server/blockmanagement/TestBlockInfo.java   |   6 +-
 .../blockmanagement/TestBlockInfoStriped.java   | 219 +++
 .../blockmanagement/TestBlockManager.java   |   4 +-
 .../blockmanagement/TestReplicationPolicy.java  |   2 +-
 22 files changed, 1122 insertions(+), 606 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3a48212/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index de60b6e..245b630 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -184,5 +184,6 @@ public class HdfsConstants {
 
   public static final byte NUM_DATA_BLOCKS = 3;
   public static final byte NUM_PARITY_BLOCKS = 2;
+  public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3a48212/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index e9baf85..b14efb4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -39,12 +39,12 @@ public interface BlockCollection {
   public ContentSummary computeContentSummary(BlockStoragePolicySuite bsps);
 
   /**
-   * @return the number of blocks
+   * @return the number of blocks or block groups
*/ 
   public int numBlocks();
 
   /**
-   * Get the blocks.
+   * Get the blocks or block groups.
*/
   public BlockInfoContiguous[] getBlocks();
 
@@ -55,8 +55,8 @@ public interface BlockCollection {
   public long getPreferredBlockSize();
 
   /**
-   * Get block replication for the collection 
-   * @return block replication value
+   * Get block replication for the collection.
+   * @return block replication value. Return 0 if the file is erasure coded.
*/
   public short getBlockReplication();
 
@@ -71,7 +71,7 @@ public interface BlockCollection {
   public String getName();
 
   /**
-   * Set the block at the given index.
+   * Set the block/block-group at the given index.
*/
   public void setBlock(int index, BlockInfoContiguous blk);
 
@@ -79,7 

[34/51] [abbrv] hadoop git commit: HADOOP-11706 Refine a little bit erasure coder API

2015-04-06 Thread zhz
HADOOP-11706 Refine a little bit erasure coder API


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37b917d1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37b917d1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37b917d1

Branch: refs/heads/HDFS-7285
Commit: 37b917d14968792aff4f6cead24c7d2ed4e97a91
Parents: ed82e68
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Mar 18 19:21:37 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:00 2015 -0700

--
 .../io/erasurecode/coder/ErasureCoder.java  |  4 +++-
 .../erasurecode/rawcoder/RawErasureCoder.java   |  4 +++-
 .../hadoop/io/erasurecode/TestCoderBase.java| 17 +---
 .../erasurecode/coder/TestErasureCoderBase.java | 21 +++-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 12 +--
 .../erasurecode/rawcoder/TestRawCoderBase.java  |  2 ++
 6 files changed, 31 insertions(+), 29 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37b917d1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
index 68875c0..c5922f3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configurable;
+
 /**
  * An erasure coder to perform encoding or decoding given a group. Generally it
  * involves calculating necessary internal steps according to codec logic. For
@@ -31,7 +33,7 @@ package org.apache.hadoop.io.erasurecode.coder;
  * of multiple coding steps.
  *
  */
-public interface ErasureCoder {
+public interface ErasureCoder extends Configurable {
 
   /**
* Initialize with the important parameters for the code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37b917d1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
index 91a9abf..9af5b6c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoder.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode.rawcoder;
 
+import org.apache.hadoop.conf.Configurable;
+
 /**
  * RawErasureCoder is a common interface for {@link RawErasureEncoder} and
  * {@link RawErasureDecoder} as both encoder and decoder share some properties.
@@ -31,7 +33,7 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
  * low level constructs, since it only takes care of the math calculation with
  * a group of byte buffers.
  */
-public interface RawErasureCoder {
+public interface RawErasureCoder extends Configurable {
 
   /**
* Initialize with the important parameters for the code.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37b917d1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
index 194413a..22fd98d 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/TestCoderBase.java
@@ -17,11 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
+import org.apache.hadoop.conf.Configuration;
+
 import java.nio.ByteBuffer;
 import java.util.Arrays;
 import java.util.Random;
 
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertTrue;
 
 /**
@@ -31,6 +32,7 @@ import static org.junit.Assert.assertTrue;
 public abstract class TestCoderBase {
   protected static Random RAND = new Random();
 
+  private 

[39/51] [abbrv] hadoop git commit: HDFS-7716. Add a test for BlockGroup support in FSImage. Contributed by Takuya Fukudome

2015-04-06 Thread zhz
HDFS-7716. Add a test for BlockGroup support in FSImage.  Contributed by Takuya 
Fukudome


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3ccfa98
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3ccfa98
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3ccfa98

Branch: refs/heads/HDFS-7285
Commit: d3ccfa987a3b089a7128b6c7ed1dd66e83662398
Parents: a4e6262
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Wed Mar 25 19:01:03 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:02 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  6 ++-
 .../hdfs/server/namenode/TestFSImage.java   | 53 
 2 files changed, 58 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3ccfa98/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 2ef8527..21e4c03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -1,4 +1,8 @@
   BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
 
 HDFS-7347. Configurable erasure coding policy for individual files and
-directories ( Zhe Zhang via vinayakumarb )
\ No newline at end of file
+directories ( Zhe Zhang via vinayakumarb )
+
+HDFS-7716. Add a test for BlockGroup support in FSImage.
+(Takuya Fukudome via szetszwo)
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3ccfa98/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 71dc978..440f5cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -31,7 +32,12 @@ import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.junit.Assert;
 
@@ -46,6 +52,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -378,4 +385,50 @@ public class TestFSImage {
   FileUtil.fullyDelete(dfsDir);
 }
   }
+
+  /**
+   * Ensure that FSImage supports BlockGroup.
+   */
+  @Test
+  public void testSupportBlockGroup() throws IOException {
+final short GROUP_SIZE = HdfsConstants.NUM_DATA_BLOCKS +
+HdfsConstants.NUM_PARITY_BLOCKS;
+final int BLOCK_SIZE = 8 * 1024 * 1024;
+Configuration conf = new HdfsConfiguration();
+conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  fs.setStoragePolicy(new Path(/), HdfsConstants.EC_STORAGE_POLICY_NAME);
+  Path file = new Path(/striped);
+  FSDataOutputStream out = fs.create(file);
+  byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);
+  out.write(bytes);
+  out.close();
+
+  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+  fs.saveNamespace();
+  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+  cluster.restartNameNodes();
+  fs = cluster.getFileSystem();
+  

[22/51] [abbrv] hadoop git commit: HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng

2015-04-06 Thread zhz
HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0de9b48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0de9b48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0de9b48

Branch: refs/heads/HDFS-7285
Commit: d0de9b48f001cfae3fd0b12b3ed77eacf6f63179
Parents: 1af3d5d
Author: drankye kai.zh...@intel.com
Authored: Thu Mar 5 22:51:52 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:57 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 203 +++
 .../hadoop/io/erasurecode/TestECSchema.java |  54 +
 3 files changed, 261 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0de9b48/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7bbacf7..ee42c84 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -12,3 +12,7 @@
 HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
 ( Kai Zheng )
 
+HADOOP-11643. Define EC schema API for ErasureCodec. Contributed by Kai 
Zheng
+( Kai Zheng )
+
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0de9b48/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
new file mode 100644
index 000..8dc3f45
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -0,0 +1,203 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.util.Collections;
+import java.util.Map;
+
+/**
+ * Erasure coding schema to housekeeper relevant information.
+ */
+public class ECSchema {
+  public static final String NUM_DATA_UNITS_KEY = k;
+  public static final String NUM_PARITY_UNITS_KEY = m;
+  public static final String CODEC_NAME_KEY = codec;
+  public static final String CHUNK_SIZE_KEY = chunkSize;
+  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+
+  private String schemaName;
+  private String codecName;
+  private MapString, String options;
+  private int numDataUnits;
+  private int numParityUnits;
+  private int chunkSize;
+
+  /**
+   * Constructor with schema name and provided options. Note the options may
+   * contain additional information for the erasure codec to interpret further.
+   * @param schemaName schema name
+   * @param options schema options
+   */
+  public ECSchema(String schemaName, MapString, String options) {
+assert (schemaName != null  ! schemaName.isEmpty());
+
+this.schemaName = schemaName;
+
+if (options == null || options.isEmpty()) {
+  throw new IllegalArgumentException(No schema options are provided);
+}
+
+String codecName = options.get(CODEC_NAME_KEY);
+if (codecName == null || codecName.isEmpty()) {
+  throw new IllegalArgumentException(No codec option is provided);
+}
+
+int dataUnits = 0, parityUnits = 0;
+try {
+  if (options.containsKey(NUM_DATA_UNITS_KEY)) {
+dataUnits = Integer.parseInt(options.get(NUM_DATA_UNITS_KEY));
+  }
+} catch (NumberFormatException e) {
+  throw new IllegalArgumentException(Option value  +
+  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+   is found. It should be an integer);
+}
+
+try {
+  if 

[18/51] [abbrv] hadoop git commit: HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng

2015-04-06 Thread zhz
HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae790470
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae790470
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae790470

Branch: refs/heads/HDFS-7285
Commit: ae7904705a255b96353471bf7f0e0a9586f936aa
Parents: e3a4821
Author: drankye dran...@gmail.com
Authored: Thu Feb 12 21:12:44 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:05:28 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |   4 +
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  |  69 +++
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  |  78 +++
 .../erasurecode/rawcoder/RawErasureCoder.java   |   2 +-
 .../erasurecode/rawcoder/util/GaloisField.java  | 497 +++
 .../io/erasurecode/rawcoder/util/RSUtil.java|  22 +
 .../hadoop/io/erasurecode/TestCoderBase.java|  28 +-
 .../erasurecode/rawcoder/TestJRSRawCoder.java   |  93 
 .../erasurecode/rawcoder/TestRawCoderBase.java  |   5 +-
 .../erasurecode/rawcoder/TestXorRawCoder.java   |   1 -
 10 files changed, 786 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae790470/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 9728f97..7bbacf7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -8,3 +8,7 @@
 
 HADOOP-11541. Raw XOR coder
 ( Kai Zheng )
+
+HADOOP-11542. Raw Reed-Solomon coder in pure Java. Contributed by Kai Zheng
+( Kai Zheng )
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae790470/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
new file mode 100644
index 000..dbb689e
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawDecoder.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+import org.apache.hadoop.io.erasurecode.rawcoder.util.RSUtil;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A raw erasure decoder in RS code scheme in pure Java in case native one
+ * isn't available in some environment. Please always use native 
implementations
+ * when possible.
+ */
+public class JRSRawDecoder extends AbstractRawErasureDecoder {
+  // To describe and calculate the needed Vandermonde matrix
+  private int[] errSignature;
+  private int[] primitivePower;
+
+  @Override
+  public void initialize(int numDataUnits, int numParityUnits, int chunkSize) {
+super.initialize(numDataUnits, numParityUnits, chunkSize);
+assert (getNumDataUnits() + getNumParityUnits()  
RSUtil.GF.getFieldSize());
+
+this.errSignature = new int[getNumParityUnits()];
+this.primitivePower = RSUtil.getPrimitivePower(getNumDataUnits(),
+getNumParityUnits());
+  }
+
+  @Override
+  protected void doDecode(ByteBuffer[] inputs, int[] erasedIndexes,
+  ByteBuffer[] outputs) {
+for (int i = 0; i  erasedIndexes.length; i++) {
+  errSignature[i] = primitivePower[erasedIndexes[i]];
+  RSUtil.GF.substitute(inputs, outputs[i], primitivePower[i]);
+}
+
+int dataLen = inputs[0].remaining();
+RSUtil.GF.solveVandermondeSystem(errSignature, outputs,
+erasedIndexes.length, dataLen);
+  }
+
+  @Override
+  protected void 

[38/51] [abbrv] hadoop git commit: HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage. Contributed by Hui Zheng.

2015-04-06 Thread zhz
HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage. 
Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f62c7110
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f62c7110
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f62c7110

Branch: refs/heads/HDFS-7285
Commit: f62c7110dbae620c45466b4897632d78b3bd37f6
Parents: 7c7ba71
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 23 15:10:10 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:21:01 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  11 +-
 .../hdfs/server/namenode/FSImageFormat.java |  62 ++--
 .../server/namenode/FSImageSerialization.java   |  78 +++---
 .../blockmanagement/TestBlockInfoStriped.java   |  34 +
 .../hdfs/server/namenode/TestFSImage.java   | 148 ++-
 5 files changed, 300 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62c7110/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index cef8318..30b5ee7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import java.io.DataOutput;
+import java.io.IOException;
 
 /**
  * Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
@@ -206,6 +208,13 @@ public class BlockInfoStriped extends BlockInfo {
 return num;
   }
 
+  @Override
+  public void write(DataOutput out) throws IOException {
+out.writeShort(dataBlockNum);
+out.writeShort(parityBlockNum);
+super.write(out);
+  }
+
   /**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction -  an under construction block.
@@ -215,7 +224,7 @@ public class BlockInfoStriped extends BlockInfo {
 final BlockInfoStripedUnderConstruction ucBlock;
 if(isComplete()) {
   ucBlock = new BlockInfoStripedUnderConstruction(this, getDataBlockNum(),
-  getParityBlockNum(),  s, targets);
+  getParityBlockNum(), s, targets);
   ucBlock.setBlockCollection(getBlockCollection());
 } else {
   // the block is already under construction

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f62c7110/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 2e6e741..ad96863 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -47,13 +47,16 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ 

[27/51] [abbrv] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903 and HDFS-7435. Contributed by Zhe Zhang.

2015-04-06 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903 and 
HDFS-7435. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cef8ce48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cef8ce48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cef8ce48

Branch: refs/heads/HDFS-7285
Commit: cef8ce48f199c8342e93193737d720a973b2fca9
Parents: aac871d
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 16 14:27:21 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:20:59 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/DecommissionManager.java | 2 +-
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java| 2 +-
 .../hadoop/hdfs/server/namenode/snapshot/FileDiffList.java  | 3 ++-
 .../src/test/java/org/apache/hadoop/hdfs/TestDecommission.java  | 5 ++---
 .../hadoop/hdfs/server/namenode/TestAddStripedBlocks.java   | 4 ++--
 5 files changed, 8 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef8ce48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
index 0faf3ad..df31d6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DecommissionManager.java
@@ -536,7 +536,7 @@ public class DecommissionManager {
  */
 private void processBlocksForDecomInternal(
 final DatanodeDescriptor datanode,
-final IteratorBlockInfoContiguous it,
+final Iterator? extends BlockInfo it,
 final ListBlockInfoContiguous insufficientlyReplicated,
 boolean pruneSufficientlyReplicated) {
   boolean firstReplicationLog = true;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef8ce48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 0e2441d..69bdc4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2001,7 +2001,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 }
 
 // Check if the file is already being truncated with the same length
-final BlockInfoContiguous last = file.getLastBlock();
+final BlockInfo last = file.getLastBlock();
 if (last != null  last.getBlockUCState() == BlockUCState.UNDER_RECOVERY) 
{
   final Block truncateBlock
   = ((BlockInfoContiguousUnderConstruction)last).getTruncateBlock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cef8ce48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
index a1263c5..d0248eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/FileDiffList.java
@@ -21,6 +21,7 @@ import java.util.Collections;
 import java.util.List;
 
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
@@ -132,7 +133,7 @@ public class FileDiffList extends
   break;
 }
 // Check if last block is part of truncate recovery
-BlockInfoContiguous lastBlock = file.getLastBlock();
+BlockInfo lastBlock = file.getLastBlock();
 Block 

[16/51] [abbrv] hadoop git commit: HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info. Contributed by Jing Zhao.

2015-04-06 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3a48212/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
index be16a87..fa7f263 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeStorageInfo.java
@@ -24,6 +24,7 @@ import java.util.List;
 import com.google.common.annotations.VisibleForTesting;
 
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage.State;
@@ -80,10 +81,10 @@ public class DatanodeStorageInfo {
   /**
* Iterates over the list of blocks belonging to the data-node.
*/
-  class BlockIterator implements IteratorBlockInfoContiguous {
-private BlockInfoContiguous current;
+  class BlockIterator implements IteratorBlockInfo {
+private BlockInfo current;
 
-BlockIterator(BlockInfoContiguous head) {
+BlockIterator(BlockInfo head) {
   this.current = head;
 }
 
@@ -91,8 +92,8 @@ public class DatanodeStorageInfo {
   return current != null;
 }
 
-public BlockInfoContiguous next() {
-  BlockInfoContiguous res = current;
+public BlockInfo next() {
+  BlockInfo res = current;
   current = 
current.getNext(current.findStorageInfo(DatanodeStorageInfo.this));
   return res;
 }
@@ -112,7 +113,7 @@ public class DatanodeStorageInfo {
   private volatile long remaining;
   private long blockPoolUsed;
 
-  private volatile BlockInfoContiguous blockList = null;
+  private volatile BlockInfo blockList = null;
   private int numBlocks = 0;
 
   // The ID of the last full block report which updated this storage.
@@ -226,7 +227,7 @@ public class DatanodeStorageInfo {
 return blockPoolUsed;
   }
 
-  public AddBlockResult addBlock(BlockInfoContiguous b) {
+  public AddBlockResult addBlock(BlockInfo b, Block reportedBlock) {
 // First check whether the block belongs to a different storage
 // on the same DN.
 AddBlockResult result = AddBlockResult.ADDED;
@@ -245,13 +246,21 @@ public class DatanodeStorageInfo {
 }
 
 // add to the head of the data-node list
-b.addStorage(this);
+b.addStorage(this, reportedBlock);
+insertToList(b);
+return result;
+  }
+
+  AddBlockResult addBlock(BlockInfoContiguous b) {
+return addBlock(b, b);
+  }
+
+  public void insertToList(BlockInfo b) {
 blockList = b.listInsert(blockList, this);
 numBlocks++;
-return result;
   }
 
-  public boolean removeBlock(BlockInfoContiguous b) {
+  public boolean removeBlock(BlockInfo b) {
 blockList = b.listRemove(blockList, this);
 if (b.removeStorage(this)) {
   numBlocks--;
@@ -265,16 +274,15 @@ public class DatanodeStorageInfo {
 return numBlocks;
   }
   
-  IteratorBlockInfoContiguous getBlockIterator() {
+  IteratorBlockInfo getBlockIterator() {
 return new BlockIterator(blockList);
-
   }
 
   /**
* Move block to the head of the list of blocks belonging to the data-node.
* @return the index of the head of the blockList
*/
-  int moveBlockToHead(BlockInfoContiguous b, int curIndex, int headIndex) {
+  int moveBlockToHead(BlockInfo b, int curIndex, int headIndex) {
 blockList = b.moveBlockToHead(blockList, this, curIndex, headIndex);
 return curIndex;
   }
@@ -284,7 +292,7 @@ public class DatanodeStorageInfo {
* @return the head of the blockList
*/
   @VisibleForTesting
-  BlockInfoContiguous getBlockListHeadForTesting(){
+  BlockInfo getBlockListHeadForTesting(){
 return blockList;
   }
 
@@ -371,6 +379,6 @@ public class DatanodeStorageInfo {
   }
 
   static enum AddBlockResult {
-ADDED, REPLACED, ALREADY_EXIST;
+ADDED, REPLACED, ALREADY_EXIST
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e3a48212/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicaUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicaUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicaUnderConstruction.java
new file mode 100644
index 000..f4600cb7
--- /dev/null
+++ 

[03/51] [abbrv] hadoop git commit: HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.

2015-04-06 Thread zhz
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b3948ea/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
new file mode 100644
index 000..b105253
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
@@ -0,0 +1,45596 @@
+?xml version=1.0 encoding=iso-8859-1 standalone=no?
+!-- Generated by the JDiff Javadoc doclet --
+!-- (http://www.jdiff.org) --
+!-- on Mon Mar 30 15:27:52 PDT 2015 --
+
+api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name=hadoop-core 2.6.0
+  jdversion=1.0.9
+
+!--  Command line arguments =  -doclet 
org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet 
-docletpath 
/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/jdiff.jar
 -verbose -classpath 
/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/classes:/Users/llu/hadoop-common/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/llu/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/common
 
s-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/llu/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/
 
Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/llu/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/llu/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/llu/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/llu/.m2/repository/commons-conf
 
iguration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/org/apache/ant/ant/1.8.1/ant-1.8.1.jar:/Users/llu/.m2/repository/org/apache/ant/ant-launcher/1.8.1/
 

[07/51] [abbrv] hadoop git commit: HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock for a very long time (sinago via cmccabe)

2015-04-06 Thread zhz
HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock 
for a very long time (sinago via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/28bebc81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/28bebc81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/28bebc81

Branch: refs/heads/HDFS-7285
Commit: 28bebc81db8bb6d1bc2574de7564fe4c595cfe09
Parents: 53959e6
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Apr 6 08:54:46 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon Apr 6 08:56:52 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 77 +---
 2 files changed, 52 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/28bebc81/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6fafec8..52325a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1379,6 +1379,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8051. FsVolumeList#addVolume should release volume reference if not
 put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
 
+HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
+lock for a very long time (sinago via cmccabe)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/28bebc81/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index f15f649..6bcbe5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1412,38 +1412,59 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaHandler createTemporary(
+  public ReplicaHandler createTemporary(
   StorageType storageType, ExtendedBlock b) throws IOException {
-ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
-if (replicaInfo != null) {
-  if (replicaInfo.getGenerationStamp()  b.getGenerationStamp()
-   replicaInfo instanceof ReplicaInPipeline) {
-// Stop the previous writer
-((ReplicaInPipeline)replicaInfo)
-  
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
-invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
-  } else {
-throw new ReplicaAlreadyExistsException(Block  + b +
- already exists in state  + replicaInfo.getState() +
- and thus cannot be created.);
+long startTimeMs = Time.monotonicNow();
+long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
+ReplicaInfo lastFoundReplicaInfo = null;
+do {
+  synchronized (this) {
+ReplicaInfo currentReplicaInfo =
+volumeMap.get(b.getBlockPoolId(), b.getBlockId());
+if (currentReplicaInfo == lastFoundReplicaInfo) {
+  if (lastFoundReplicaInfo != null) {
+invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo 
});
+  }
+  FsVolumeReference ref =
+  volumes.getNextVolume(storageType, b.getNumBytes());
+  FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
+  // create a temporary file to hold block in the designated volume
+  File f;
+  try {
+f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
+  } catch (IOException e) {
+IOUtils.cleanup(null, ref);
+throw e;
+  }
+  ReplicaInPipeline newReplicaInfo =
+  new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
+  f.getParentFile(), 0);
+  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
+  return new ReplicaHandler(newReplicaInfo, ref);
+} else {
+  if (!(currentReplicaInfo.getGenerationStamp()  b
+  .getGenerationStamp()  currentReplicaInfo 

[15/51] [abbrv] hadoop git commit: HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai Zheng via umamahesh)

2015-04-06 Thread zhz
HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding (Kai 
Zheng via umamahesh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7175f1cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7175f1cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7175f1cc

Branch: refs/heads/HDFS-7285
Commit: 7175f1cc0b9c8eb88cf7b0285c127e8c11045245
Parents: dd5fd7b
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Thu Jan 29 14:15:13 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:01:05 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  4 +
 .../apache/hadoop/io/erasurecode/ECChunk.java   | 82 +
 .../rawcoder/AbstractRawErasureCoder.java   | 63 +
 .../rawcoder/AbstractRawErasureDecoder.java | 93 
 .../rawcoder/AbstractRawErasureEncoder.java | 93 
 .../erasurecode/rawcoder/RawErasureCoder.java   | 78 
 .../erasurecode/rawcoder/RawErasureDecoder.java | 55 
 .../erasurecode/rawcoder/RawErasureEncoder.java | 54 
 8 files changed, 522 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7175f1cc/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
new file mode 100644
index 000..8ce5a89
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -0,0 +1,4 @@
+  BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
+
+HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
+(Kai Zheng via umamahesh)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7175f1cc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
new file mode 100644
index 000..f84eb11
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -0,0 +1,82 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode;
+
+import java.nio.ByteBuffer;
+
+/**
+ * A wrapper for ByteBuffer or bytes array for an erasure code chunk.
+ */
+public class ECChunk {
+
+  private ByteBuffer chunkBuffer;
+
+  /**
+   * Wrapping a ByteBuffer
+   * @param buffer
+   */
+  public ECChunk(ByteBuffer buffer) {
+this.chunkBuffer = buffer;
+  }
+
+  /**
+   * Wrapping a bytes array
+   * @param buffer
+   */
+  public ECChunk(byte[] buffer) {
+this.chunkBuffer = ByteBuffer.wrap(buffer);
+  }
+
+  /**
+   * Convert to ByteBuffer
+   * @return ByteBuffer
+   */
+  public ByteBuffer getBuffer() {
+return chunkBuffer;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of ByteBuffers
+   * @param chunks
+   * @return an array of ByteBuffers
+   */
+  public static ByteBuffer[] toBuffers(ECChunk[] chunks) {
+ByteBuffer[] buffers = new ByteBuffer[chunks.length];
+
+for (int i = 0; i  chunks.length; i++) {
+  buffers[i] = chunks[i].getBuffer();
+}
+
+return buffers;
+  }
+
+  /**
+   * Convert an array of this chunks to an array of byte array
+   * @param chunks
+   * @return an array of byte array
+   */
+  public static byte[][] toArray(ECChunk[] chunks) {
+byte[][] bytesArr = new byte[chunks.length][];
+
+for (int i = 0; i  chunks.length; i++) {
+  bytesArr[i] = chunks[i].getBuffer().array();
+}
+
+return bytesArr;
+  }
+}


[06/51] [abbrv] hadoop git commit: TestFairScheduler.testContinuousScheduling fails Intermittently. Contributed by Zhihai Xu.

2015-04-06 Thread zhz
TestFairScheduler.testContinuousScheduling fails Intermittently. Contributed by 
Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/53959e69
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/53959e69
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/53959e69

Branch: refs/heads/HDFS-7285
Commit: 53959e69f7ef4442324627fbfb159012968a2092
Parents: 96d7211
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Apr 6 20:13:21 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon Apr 6 20:19:13 2015 +0900

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../resourcemanager/scheduler/fair/TestFairScheduler.java | 7 ++-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/53959e69/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index b1f8913..695c4a6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -145,6 +145,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3435. AM container to be allocated Appattempt AM container shown as 
null.
 (Bibin A Chundatt via xgong)
 
+YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
+(Zhihai Xu via ozawa)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/53959e69/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index b5bfb8c..98877e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -3922,12 +3922,9 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 createResourceRequest(1024, 1, ResourceRequest.ANY, 2, 1, true);
 ask.clear();
 ask.add(request);
+scheduler.stop();
 scheduler.allocate(appAttemptId, ask, new ArrayListContainerId(), null, 
null);
-
-// Wait until app gets resources
-while (app.getCurrentConsumption()
-.equals(Resources.createResource(1024, 1))) { }
-
+scheduler.continuousSchedulingAttempt();
 Assert.assertEquals(2048, app.getCurrentConsumption().getMemory());
 Assert.assertEquals(2, app.getCurrentConsumption().getVirtualCores());
 



[12/51] [abbrv] hadoop git commit: Added the missed entry for commit of HADOOP-11541

2015-04-06 Thread zhz
Added the missed entry for commit of HADOOP-11541


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/958e8c1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/958e8c1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/958e8c1e

Branch: refs/heads/HDFS-7285
Commit: 958e8c1eabbbf05da16dc037077791777924dc3d
Parents: f50b3df
Author: drankye dran...@gmail.com
Authored: Mon Feb 9 22:04:08 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:01:05 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 5 -
 1 file changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/958e8c1e/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 2124800..9728f97 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -4,4 +4,7 @@
 (Kai Zheng via umamahesh)
 
 HADOOP-11534. Minor improvements for raw erasure coders
-( Kai Zheng via vinayakumarb )
\ No newline at end of file
+( Kai Zheng via vinayakumarb )
+
+HADOOP-11541. Raw XOR coder
+( Kai Zheng )



[14/51] [abbrv] hadoop git commit: HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai Zheng )

2015-04-06 Thread zhz
HADOOP-11534. Minor improvements for raw erasure coders ( Contributed by Kai 
Zheng )


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b431203a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b431203a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b431203a

Branch: refs/heads/HDFS-7285
Commit: b431203a8b2bb6d480347d51a28b9e3a4f23e322
Parents: 7175f1c
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Mon Feb 2 14:39:53 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 10:01:05 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  5 -
 .../org/apache/hadoop/io/erasurecode/ECChunk.java| 15 +--
 .../rawcoder/AbstractRawErasureCoder.java| 12 ++--
 3 files changed, 23 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b431203a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 8ce5a89..2124800 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -1,4 +1,7 @@
   BREAKDOWN OF HADOOP-11264 SUBTASKS AND RELATED JIRAS (Common part of 
HDFS-7285)
 
 HADOOP-11514. Raw Erasure Coder API for concrete encoding and decoding
-(Kai Zheng via umamahesh)
\ No newline at end of file
+(Kai Zheng via umamahesh)
+
+HADOOP-11534. Minor improvements for raw erasure coders
+( Kai Zheng via vinayakumarb )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b431203a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
index f84eb11..01e8f35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECChunk.java
@@ -66,15 +66,26 @@ public class ECChunk {
   }
 
   /**
-   * Convert an array of this chunks to an array of byte array
+   * Convert an array of this chunks to an array of byte array.
+   * Note the chunk buffers are not affected.
* @param chunks
* @return an array of byte array
*/
   public static byte[][] toArray(ECChunk[] chunks) {
 byte[][] bytesArr = new byte[chunks.length][];
 
+ByteBuffer buffer;
 for (int i = 0; i  chunks.length; i++) {
-  bytesArr[i] = chunks[i].getBuffer().array();
+  buffer = chunks[i].getBuffer();
+  if (buffer.hasArray()) {
+bytesArr[i] = buffer.array();
+  } else {
+bytesArr[i] = new byte[buffer.remaining()];
+// Avoid affecting the original one
+buffer.mark();
+buffer.get(bytesArr[i]);
+buffer.reset();
+  }
 }
 
 return bytesArr;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b431203a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
index 474542b..74d2ab6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/AbstractRawErasureCoder.java
@@ -24,26 +24,26 @@ package org.apache.hadoop.io.erasurecode.rawcoder;
  */
 public abstract class AbstractRawErasureCoder implements RawErasureCoder {
 
-  private int dataSize;
-  private int paritySize;
+  private int numDataUnits;
+  private int numParityUnits;
   private int chunkSize;
 
   @Override
   public void initialize(int numDataUnits, int numParityUnits,
  int chunkSize) {
-this.dataSize = numDataUnits;
-this.paritySize = numParityUnits;
+this.numDataUnits = numDataUnits;
+this.numParityUnits = numParityUnits;
 this.chunkSize = chunkSize;
   }
 
   @Override
   public int getNumDataUnits() {
-return dataSize;
+return numDataUnits;
   }
 
   @Override
   public int 

[04/51] [abbrv] hadoop git commit: HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.

2015-04-06 Thread zhz
HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b3948ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b3948ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b3948ea

Branch: refs/heads/HDFS-7285
Commit: 4b3948ea365db07df7a9369a271009fafd1ba8f5
Parents: 5370e71
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Sat Apr 4 13:52:01 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Sat Apr 4 13:52:01 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
 .../jdiff/Apache_Hadoop_Common_2.6.0.xml| 45596 +
 .../jdiff/Apache_Hadoop_HDFS_2.6.0.xml  | 19520 +++
 hadoop-project-dist/pom.xml |24 +-
 hadoop-project/pom.xml  | 2 +
 5 files changed, 65137 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b3948ea/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index fd926aa..f52e09f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1205,6 +1205,8 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11377. Added Null.java without which jdiff completely flops. 
(Tsuyoshi
 Ozawa via vinodkv)
+
+HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. (Li Lu via 
vinodkv)
 
 Release 2.6.1 - UNRELEASED
 



[40/50] [abbrv] hadoop git commit: HDFS-8051. FsVolumeList#addVolume should release volume reference if not put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)

2015-04-06 Thread zjshen
HDFS-8051. FsVolumeList#addVolume should release volume reference if not put it 
into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e1ffb3ea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e1ffb3ea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e1ffb3ea

Branch: refs/heads/YARN-2928
Commit: e1ffb3ea615e830b4215fafd0b2c1d05a50d4b0c
Parents: 5864e88
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Apr 3 16:34:23 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:16 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../datanode/fsdataset/impl/FsVolumeList.java|  5 +
 .../fsdataset/impl/TestFsVolumeList.java | 19 +++
 3 files changed, 27 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ffb3ea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2d399a4..6fafec8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1376,6 +1376,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7996. After swapping a volume, BlockReceiver reports
 ReplicaNotFoundException (Lei (Eddy) Xu via Colin P. McCabe)
 
+HDFS-8051. FsVolumeList#addVolume should release volume reference if not
+put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ffb3ea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
index 4fddfb9..d87595c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeList.java
@@ -41,6 +41,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.VolumeChoosingPolicy;
 import org.apache.hadoop.hdfs.server.datanode.BlockScanner;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.Time;
 
@@ -292,6 +293,10 @@ class FsVolumeList {
 }
 if (blockScanner != null) {
   blockScanner.addVolumeScanner(ref);
+} else {
+  // If the volume is not put into a volume scanner, it does not need to
+  // hold the reference.
+  IOUtils.cleanup(FsDatasetImpl.LOG, ref);
 }
 // If the volume is used to replace a failed volume, it needs to reset the
 // volume failure info for this volume.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e1ffb3ea/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
index 46189ba..eccff89 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestFsVolumeList.java
@@ -35,6 +35,7 @@ import java.util.Collections;
 import java.util.List;
 
 import static org.junit.Assert.assertNotEquals;
+import static org.junit.Assert.fail;
 import static org.mockito.Mockito.mock;
 
 public class TestFsVolumeList {
@@ -101,4 +102,22 @@ public class TestFsVolumeList {
 // checkDirs() should ignore the 2nd volume since it is closed.
 volumeList.checkDirs();
   }
+
+  @Test
+  public void testReleaseVolumeRefIfNoBlockScanner() throws IOException {
+FsVolumeList volumeList = new FsVolumeList(
+Collections.VolumeFailureInfoemptyList(), null, blockChooser);
+File volDir = new File(baseDir, 

[36/50] [abbrv] hadoop git commit: HADOOP-11800. Clean up some test methods in TestCodec.java. Contributed by Brahma Reddy Battula.

2015-04-06 Thread zjshen
HADOOP-11800. Clean up some test methods in TestCodec.java. Contributed by 
Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff35b525
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff35b525
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff35b525

Branch: refs/heads/YARN-2928
Commit: ff35b525ac2a2c92d4d2e366deee80a9fb48f576
Parents: aae48de
Author: Akira Ajisaka aajis...@apache.org
Authored: Sat Apr 4 00:32:18 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:15 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/test/java/org/apache/hadoop/io/compress/TestCodec.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff35b525/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 495c42d..7dcf8c1 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -494,6 +494,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11760. Fix typo of javadoc in DistCp. (Brahma Reddy Battula via
 ozawa).
 
+HADOOP-11800. Clean up some test methods in TestCodec.java.
+(Brahma Reddy Battula via aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff35b525/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
index 7246bf5..5d4af91 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodec.java
@@ -827,7 +827,7 @@ public class TestCodec {
 br.close();
   }
 
-  public void testGzipCodecWrite(boolean useNative) throws IOException {
+  private void testGzipCodecWrite(boolean useNative) throws IOException {
 // Create a gzipped file using a compressor from the CodecPool,
 // and try to read it back via the regular GZIPInputStream.
 
@@ -894,7 +894,7 @@ public class TestCodec {
   public void testGzipNativeCodecWrite() throws IOException {
 testGzipCodecWrite(true);
   }
-
+  @Test
   public void testCodecPoolAndGzipDecompressor() {
 // BuiltInZlibInflater should not be used as the GzipCodec decompressor.
 // Assert that this is the case.



[26/50] [abbrv] hadoop git commit: HDFS-7888. Change DFSOutputStream and DataStreamer for convenience of subclassing. Contributed by Li Bo

2015-04-06 Thread zjshen
HDFS-7888. Change DFSOutputStream and DataStreamer for convenience of 
subclassing. Contributed by Li Bo


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b98f637c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b98f637c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b98f637c

Branch: refs/heads/YARN-2928
Commit: b98f637c1843b1f79016fd86244ecc68039ba54a
Parents: b4c3ce1
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Apr 2 10:59:26 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 116 ++-
 .../org/apache/hadoop/hdfs/DataStreamer.java|  13 +--
 3 files changed, 69 insertions(+), 63 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b98f637c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 80d958d..0c66309 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -376,6 +376,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..).
 (Walter Su via wang)
 
+HDFS-7888. Change DFSOutputStream and DataStreamer for convenience of
+subclassing. (Li Bo via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b98f637c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index c88639d..f6733e3 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InterruptedIOException;
-import java.net.Socket;
 import java.nio.channels.ClosedChannelException;
 import java.util.EnumSet;
 import java.util.concurrent.atomic.AtomicReference;
@@ -95,29 +94,29 @@ public class DFSOutputStream extends FSOutputSummer
   static CryptoProtocolVersion[] SUPPORTED_CRYPTO_VERSIONS =
   CryptoProtocolVersion.supported();
 
-  private final DFSClient dfsClient;
-  private final ByteArrayManager byteArrayManager;
+  protected final DFSClient dfsClient;
+  protected final ByteArrayManager byteArrayManager;
   // closed is accessed by different threads under different locks.
-  private volatile boolean closed = false;
-
-  private final String src;
-  private final long fileId;
-  private final long blockSize;
-  private final int bytesPerChecksum;
-
-  private DFSPacket currentPacket = null;
-  private DataStreamer streamer;
-  private int packetSize = 0; // write packet size, not including the header.
-  private int chunksPerPacket = 0;
-  private long lastFlushOffset = 0; // offset when flush was invoked
+  protected volatile boolean closed = false;
+
+  protected final String src;
+  protected final long fileId;
+  protected final long blockSize;
+  protected final int bytesPerChecksum;
+
+  protected DFSPacket currentPacket = null;
+  protected DataStreamer streamer;
+  protected int packetSize = 0; // write packet size, not including the header.
+  protected int chunksPerPacket = 0;
+  protected long lastFlushOffset = 0; // offset when flush was invoked
   private long initialFileSize = 0; // at time of file open
   private final short blockReplication; // replication factor of file
-  private boolean shouldSyncBlock = false; // force blocks to disk upon close
-  private final AtomicReferenceCachingStrategy cachingStrategy;
+  protected boolean shouldSyncBlock = false; // force blocks to disk upon close
+  protected final AtomicReferenceCachingStrategy cachingStrategy;
   private FileEncryptionInfo fileEncryptionInfo;
 
   /** Use {@link ByteArrayManager} to create buffer for non-heartbeat 
packets.*/
-  private DFSPacket createPacket(int packetSize, int chunksPerPkt, long 
offsetInBlock,
+  protected DFSPacket createPacket(int packetSize, int chunksPerPkt, long 
offsetInBlock,
   long seqno, boolean lastPacketInBlock) throws InterruptedIOException {
 final byte[] buf;
 final int bufferSize = PacketHeader.PKT_MAX_HEADER_LEN + packetSize;
@@ 

[05/50] [abbrv] hadoop git commit: HDFS-7671. hdfs user guide should point to the common rack awareness doc. Contributed by Kai Sasaki.

2015-04-06 Thread zjshen
HDFS-7671. hdfs user guide should point to the common rack awareness doc. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8af286f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8af286f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8af286f4

Branch: refs/heads/YARN-2928
Commit: 8af286f4262328228c801cf0b1e8e6e49e255d6d
Parents: 6916c77
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 1 00:26:16 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../hadoop-hdfs/src/site/markdown/HdfsUserGuide.md| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8af286f4/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e8075e6..f3537b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -368,6 +368,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7944. Minor cleanup of BlockPoolManager#getAllNamenodeThreads.
 (Arpit Agarwal)
 
+HDFS-7671. hdfs user guide should point to the common rack awareness doc.
+(Kai Sasaki via aajisaka)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8af286f4/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
index 37fa4be..ffd8532 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HdfsUserGuide.md
@@ -264,7 +264,7 @@ For command usage, see 
[balancer](./HDFSCommands.html#balancer).
 Rack Awareness
 --
 
-Typically large Hadoop clusters are arranged in racks and network traffic 
between different nodes with in the same rack is much more desirable than 
network traffic across the racks. In addition NameNode tries to place replicas 
of block on multiple racks for improved fault tolerance. Hadoop lets the 
cluster administrators decide which rack a node belongs to through 
configuration variable `net.topology.script.file.name`. When this script is 
configured, each node runs the script to determine its rack id. A default 
installation assumes all the nodes belong to the same rack. This feature and 
configuration is further described in PDF attached to 
[HADOOP-692](https://issues.apache.org/jira/browse/HADOOP-692).
+A HDFS cluster can recognize the topology of racks where each nodes are put. 
It is important to configure this topology in order to optimize the data 
capacity and usage. For more detail, please check the [rack 
awareness](../hadoop-common/RackAwareness.html) in common document.
 
 Safemode
 



[35/50] [abbrv] hadoop git commit: HDFS-7811. Avoid recursive call getStoragePolicyID in INodeFile#computeQuotaUsage. Contributed by Xiaoyu Yao and Jing Zhao.

2015-04-06 Thread zjshen
HDFS-7811. Avoid recursive call getStoragePolicyID in 
INodeFile#computeQuotaUsage. Contributed by Xiaoyu Yao and Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aae48dea
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aae48dea
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aae48dea

Branch: refs/heads/YARN-2928
Commit: aae48deac10c1ef26504997ae57d298bd6d6fd8d
Parents: c134f99
Author: Jing Zhao ji...@apache.org
Authored: Thu Apr 2 21:20:30 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hadoop/hdfs/server/namenode/FSImage.java| 14 +---
 .../hadoop/hdfs/server/namenode/INode.java  | 29 +++--
 .../hdfs/server/namenode/INodeDirectory.java| 34 +---
 .../hadoop/hdfs/server/namenode/INodeFile.java  | 13 
 .../hadoop/hdfs/server/namenode/INodeMap.java   |  4 +--
 .../hdfs/server/namenode/INodeReference.java| 15 +
 .../hdfs/server/namenode/INodeSymlink.java  |  2 +-
 .../snapshot/DirectoryWithSnapshotFeature.java  |  7 ++--
 9 files changed, 84 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aae48dea/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 85c013e..04eac75 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -877,6 +877,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8008. Support client-side back off when the datanodes are congested.
 (wheat9)
 
+HDFS-7811. Avoid recursive call getStoragePolicyID in
+INodeFile#computeQuotaUsage. (Xiaoyu Yao and jing9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aae48dea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index 7454850..d62b952 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -856,23 +856,27 @@ public class FSImage implements Closeable {
*/
   static void updateCountForQuota(BlockStoragePolicySuite bsps,
   INodeDirectory root) {
-updateCountForQuotaRecursively(bsps, root, new 
QuotaCounts.Builder().build());
+updateCountForQuotaRecursively(bsps, root.getStoragePolicyID(), root,
+new QuotaCounts.Builder().build());
  }
 
   private static void updateCountForQuotaRecursively(BlockStoragePolicySuite 
bsps,
-  INodeDirectory dir, QuotaCounts counts) {
+  byte blockStoragePolicyId, INodeDirectory dir, QuotaCounts counts) {
 final long parentNamespace = counts.getNameSpace();
 final long parentStoragespace = counts.getStorageSpace();
 final EnumCountersStorageType parentTypeSpaces = counts.getTypeSpaces();
 
-dir.computeQuotaUsage4CurrentDirectory(bsps, counts);
+dir.computeQuotaUsage4CurrentDirectory(bsps, blockStoragePolicyId, counts);
 
 for (INode child : dir.getChildrenList(Snapshot.CURRENT_STATE_ID)) {
+  final byte childPolicyId = 
child.getStoragePolicyIDForQuota(blockStoragePolicyId);
   if (child.isDirectory()) {
-updateCountForQuotaRecursively(bsps, child.asDirectory(), counts);
+updateCountForQuotaRecursively(bsps, childPolicyId,
+child.asDirectory(), counts);
   } else {
 // file or symlink: count here to reduce recursive calls.
-child.computeQuotaUsage(bsps, counts, false);
+child.computeQuotaUsage(bsps, childPolicyId, counts, false,
+Snapshot.CURRENT_STATE_ID);
   }
 }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aae48dea/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index 586cce4..e629441 100644
--- 

[20/50] [abbrv] hadoop git commit: HDFS-8036. Use snapshot path as source when using snapshot diff report in DistCp. Contributed by Jing Zhao.

2015-04-06 Thread zjshen
HDFS-8036. Use snapshot path as source when using snapshot diff report in 
DistCp. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/61487d36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/61487d36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/61487d36

Branch: refs/heads/YARN-2928
Commit: 61487d3642b69381c99b1463fce176063de15726
Parents: 093e092
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 16:50:59 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../org/apache/hadoop/tools/DistCpSync.java | 21 +-
 .../hadoop/tools/mapred/CopyCommitter.java  |  3 +-
 .../org/apache/hadoop/tools/TestDistCpSync.java | 40 +++-
 4 files changed, 63 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/61487d36/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f265ead..1d9e200 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1345,6 +1345,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7748. Separate ECN flags from the Status in the 
DataTransferPipelineAck.
 (Anu Engineer and Haohui Mai via wheat9)
 
+HDFS-8036. Use snapshot path as source when using snapshot diff report in
+DistCp. (Jing Zhao via wheat9)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61487d36/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
index 26d7eb4..8e71b6f 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/DistCpSync.java
@@ -22,6 +22,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
 
 import java.io.IOException;
@@ -86,6 +87,22 @@ class DistCpSync {
 } finally {
   deleteTargetTmpDir(targetFs, tmpDir);
   // TODO: since we have tmp directory, we can support undo with failures
+  // set the source path using the snapshot path
+  
inputOptions.setSourcePaths(Arrays.asList(getSourceSnapshotPath(sourceDir,
+  inputOptions.getToSnapshot(;
+}
+  }
+
+  private static String getSnapshotName(String name) {
+return Path.CUR_DIR.equals(name) ?  : name;
+  }
+
+  private static Path getSourceSnapshotPath(Path sourceDir, String 
snapshotName) {
+if (Path.CUR_DIR.equals(snapshotName)) {
+  return sourceDir;
+} else {
+  return new Path(sourceDir,
+  HdfsConstants.DOT_SNAPSHOT_DIR + Path.SEPARATOR + snapshotName);
 }
   }
 
@@ -136,8 +153,10 @@ class DistCpSync {
   static DiffInfo[] getDiffs(DistCpOptions inputOptions,
   DistributedFileSystem fs, Path sourceDir, Path targetDir) {
 try {
+  final String from = getSnapshotName(inputOptions.getFromSnapshot());
+  final String to = getSnapshotName(inputOptions.getToSnapshot());
   SnapshotDiffReport sourceDiff = fs.getSnapshotDiffReport(sourceDir,
-  inputOptions.getFromSnapshot(), inputOptions.getToSnapshot());
+  from, to);
   return DiffInfo.getDiffs(sourceDiff, targetDir);
 } catch (IOException e) {
   DistCp.LOG.warn(Failed to compute snapshot diff on  + sourceDir, e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/61487d36/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
index 9ec57f4..2b1e510 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/mapred/CopyCommitter.java
@@ -90,8 +90,7 @@ public class CopyCommitter extends 

[48/50] [abbrv] hadoop git commit: TestFairScheduler.testContinuousScheduling fails Intermittently. Contributed by Zhihai Xu.

2015-04-06 Thread zjshen
TestFairScheduler.testContinuousScheduling fails Intermittently. Contributed by 
Zhihai Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37725e71
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37725e71
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37725e71

Branch: refs/heads/YARN-2928
Commit: 37725e71643f9dd6f68796736cc9c407ea437cb3
Parents: 0b09b40
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon Apr 6 20:13:21 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:17 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../resourcemanager/scheduler/fair/TestFairScheduler.java | 7 ++-
 2 files changed, 5 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37725e71/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 0c5aac8..5f20ca3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -201,6 +201,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3435. AM container to be allocated Appattempt AM container shown as 
null.
 (Bibin A Chundatt via xgong)
 
+YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
+(Zhihai Xu via ozawa)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37725e71/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
index b5bfb8c..98877e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
@@ -3922,12 +3922,9 @@ public class TestFairScheduler extends 
FairSchedulerTestBase {
 createResourceRequest(1024, 1, ResourceRequest.ANY, 2, 1, true);
 ask.clear();
 ask.add(request);
+scheduler.stop();
 scheduler.allocate(appAttemptId, ask, new ArrayListContainerId(), null, 
null);
-
-// Wait until app gets resources
-while (app.getCurrentConsumption()
-.equals(Resources.createResource(1024, 1))) { }
-
+scheduler.continuousSchedulingAttempt();
 Assert.assertEquals(2048, app.getCurrentConsumption().getMemory());
 Assert.assertEquals(2, app.getCurrentConsumption().getVirtualCores());
 



[46/50] [abbrv] hadoop git commit: HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.

2015-04-06 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1069dcee/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
--
diff --git 
a/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
new file mode 100644
index 000..b105253
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Apache_Hadoop_Common_2.6.0.xml
@@ -0,0 +1,45596 @@
+?xml version=1.0 encoding=iso-8859-1 standalone=no?
+!-- Generated by the JDiff Javadoc doclet --
+!-- (http://www.jdiff.org) --
+!-- on Mon Mar 30 15:27:52 PDT 2015 --
+
+api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name=hadoop-core 2.6.0
+  jdversion=1.0.9
+
+!--  Command line arguments =  -doclet 
org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet 
-docletpath 
/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/hadoop-annotations.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/jdiff.jar
 -verbose -classpath 
/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/classes:/Users/llu/hadoop-common/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/.m2/repository/com/google/guava/guava/11.0.2/guava-11.0.2.jar:/Users/llu/.m2/repository/commons-cli/commons-cli/1.2/commons-cli-1.2.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/xmlenc/xmlenc/0.52/xmlenc-0.52.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/common
 
s-codec/commons-codec/1.4/commons-codec-1.4.jar:/Users/llu/.m2/repository/commons-io/commons-io/2.4/commons-io-2.4.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/javax/servlet/servlet-api/2.5/servlet-api-2.5.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty/6.1.26/jetty-6.1.26.jar:/Users/llu/.m2/repository/org/mortbay/jetty/jetty-util/6.1.26/jetty-util-6.1.26.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-core/1.9/jersey-core-1.9.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/
 
Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/com/sun/jersey/jersey-server/1.9/jersey-server-1.9.jar:/Users/llu/.m2/repository/asm/asm/3.2/asm-3.2.jar:/Users/llu/.m2/repository/commons-logging/commons-logging/1.1.3/commons-logging-1.1.3.jar:/Users/llu/.m2/repository/log4j/log4j/1.2.17/log4j-1.2.17.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-lang/commons-lang/2.6/commons-lang-2.6.jar:/Users/llu/.m2/repository/commons-conf
 
iguration/commons-configuration/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-core-asl/1.9.13/jackson-core-asl-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-mapper-asl/1.9.13/jackson-mapper-asl-1.9.13.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/org/apache/ant/ant/1.8.1/ant-1.8.1.jar:/Users/llu/.m2/repository/org/apache/ant/ant-launcher/1.8.1/
 

[45/50] [abbrv] hadoop git commit: HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.

2015-04-06 Thread zjshen
http://git-wip-us.apache.org/repos/asf/hadoop/blob/1069dcee/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
new file mode 100644
index 000..5514700
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Apache_Hadoop_HDFS_2.6.0.xml
@@ -0,0 +1,19520 @@
+?xml version=1.0 encoding=iso-8859-1 standalone=no?
+!-- Generated by the JDiff Javadoc doclet --
+!-- (http://www.jdiff.org) --
+!-- on Mon Mar 30 15:30:43 PDT 2015 --
+
+api
+  xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
+  xsi:noNamespaceSchemaLocation='api.xsd'
+  name=hadoop-hdfs 2.6.0
+  jdversion=1.0.9
+
+!--  Command line arguments =  -doclet 
org.apache.hadoop.classification.tools.ExcludePrivateAnnotationsJDiffDoclet 
-docletpath 
/Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/hadoop-annotations.jar:/Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/jdiff.jar
 -verbose -classpath 
/Users/llu/hadoop-common/hadoop-hdfs-project/hadoop-hdfs/target/classes:/Users/llu/hadoop-common/hadoop-common-project/hadoop-annotations/target/hadoop-annotations-2.6.0.jar:/Library/Java/JavaVirtualMachines/jdk1.7.0_67.jdk/Contents/Home/lib/tools.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-auth/target/hadoop-auth-2.6.0.jar:/Users/llu/.m2/repository/org/slf4j/slf4j-api/1.7.5/slf4j-api-1.7.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpclient/4.2.5/httpclient-4.2.5.jar:/Users/llu/.m2/repository/org/apache/httpcomponents/httpcore/4.2.5/httpcore-4.2.5.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-kerberos-codec/2.0.0-M15/apacheds-ker
 
beros-codec-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/server/apacheds-i18n/2.0.0-M15/apacheds-i18n-2.0.0-M15.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-asn1-api/1.0.0-M20/api-asn1-api-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/directory/api/api-util/1.0.0-M20/api-util-1.0.0-M20.jar:/Users/llu/.m2/repository/org/apache/zookeeper/zookeeper/3.4.6/zookeeper-3.4.6.jar:/Users/llu/.m2/repository/org/apache/curator/curator-framework/2.6.0/curator-framework-2.6.0.jar:/Users/llu/hadoop-common/hadoop-common-project/hadoop-common/target/hadoop-common-2.6.0.jar:/Users/llu/.m2/repository/org/apache/commons/commons-math3/3.1.1/commons-math3-3.1.1.jar:/Users/llu/.m2/repository/commons-httpclient/commons-httpclient/3.1/commons-httpclient-3.1.jar:/Users/llu/.m2/repository/commons-net/commons-net/3.1/commons-net-3.1.jar:/Users/llu/.m2/repository/commons-collections/commons-collections/3.2.1/commons-collections-3.2.1.jar:/Users/llu/.m2/repository/com/sun/jerse
 
y/jersey-json/1.9/jersey-json-1.9.jar:/Users/llu/.m2/repository/org/codehaus/jettison/jettison/1.1/jettison-1.1.jar:/Users/llu/.m2/repository/com/sun/xml/bind/jaxb-impl/2.2.3-1/jaxb-impl-2.2.3-1.jar:/Users/llu/.m2/repository/javax/xml/bind/jaxb-api/2.2.2/jaxb-api-2.2.2.jar:/Users/llu/.m2/repository/javax/xml/stream/stax-api/1.0-2/stax-api-1.0-2.jar:/Users/llu/.m2/repository/javax/activation/activation/1.1/activation-1.1.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-jaxrs/1.9.13/jackson-jaxrs-1.9.13.jar:/Users/llu/.m2/repository/org/codehaus/jackson/jackson-xc/1.9.13/jackson-xc-1.9.13.jar:/Users/llu/.m2/repository/tomcat/jasper-compiler/5.5.23/jasper-compiler-5.5.23.jar:/Users/llu/.m2/repository/commons-el/commons-el/1.0/commons-el-1.0.jar:/Users/llu/.m2/repository/net/java/dev/jets3t/jets3t/0.9.0/jets3t-0.9.0.jar:/Users/llu/.m2/repository/com/jamesmurty/utils/java-xmlbuilder/0.4/java-xmlbuilder-0.4.jar:/Users/llu/.m2/repository/commons-configuration/commons-configuratio
 
n/1.6/commons-configuration-1.6.jar:/Users/llu/.m2/repository/commons-digester/commons-digester/1.8/commons-digester-1.8.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils/1.7.0/commons-beanutils-1.7.0.jar:/Users/llu/.m2/repository/commons-beanutils/commons-beanutils-core/1.8.0/commons-beanutils-core-1.8.0.jar:/Users/llu/.m2/repository/org/apache/avro/avro/1.7.4/avro-1.7.4.jar:/Users/llu/.m2/repository/com/thoughtworks/paranamer/paranamer/2.3/paranamer-2.3.jar:/Users/llu/.m2/repository/org/xerial/snappy/snappy-java/1.0.4.1/snappy-java-1.0.4.1.jar:/Users/llu/.m2/repository/com/google/code/gson/gson/2.2.4/gson-2.2.4.jar:/Users/llu/.m2/repository/com/jcraft/jsch/0.1.42/jsch-0.1.42.jar:/Users/llu/.m2/repository/org/apache/curator/curator-client/2.6.0/curator-client-2.6.0.jar:/Users/llu/.m2/repository/org/apache/curator/curator-recipes/2.6.0/curator-recipes-2.6.0.jar:/Users/llu/.m2/repository/com/google/code/findbugs/jsr305/1.3.9/jsr305-1.3.9.jar:/Users/llu/.m2/repository/
 

[31/50] [abbrv] hadoop git commit: YARN-3415. Non-AM containers can be counted towards amResourceUsage of a fairscheduler queue (Zhihai Xu via Sandy Ryza)

2015-04-06 Thread zjshen
YARN-3415. Non-AM containers can be counted towards amResourceUsage of a 
fairscheduler queue (Zhihai Xu via Sandy Ryza)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e75307b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e75307b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e75307b2

Branch: refs/heads/YARN-2928
Commit: e75307b282014f73b435c0d4f6e5399af4ba128c
Parents: 97092e4
Author: Sandy Ryza sa...@cloudera.com
Authored: Thu Apr 2 13:56:08 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 ++
 .../scheduler/fair/FSAppAttempt.java| 31 +++-
 .../scheduler/fair/FSLeafQueue.java |  5 +-
 .../scheduler/fair/FairScheduler.java   |  6 ---
 .../scheduler/fair/TestFairScheduler.java   | 50 
 5 files changed, 64 insertions(+), 31 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e75307b2/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bc8d06a..18a3c37 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -188,6 +188,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3425. NPE from RMNodeLabelsManager.serviceStop when 
 NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)
 
+YARN-3415. Non-AM containers can be counted towards amResourceUsage of a
+Fair Scheduler queue (Zhihai Xu via Sandy Ryza)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e75307b2/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
index 46617ff..f0d1ed1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java
@@ -523,8 +523,11 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   // Inform the node
   node.allocateContainer(allocatedContainer);
 
-  // If this container is used to run AM, update the leaf queue's AM usage
-  if (getLiveContainers().size() == 1  !getUnmanagedAM()) {
+  // If not running unmanaged, the first container we allocate is always
+  // the AM. Set the amResource for this app and update the leaf queue's AM
+  // usage
+  if (!isAmRunning()  !getUnmanagedAM()) {
+setAMResource(container.getResource());
 getQueue().addAMResourceUsage(container.getResource());
 setAmRunning(true);
   }
@@ -551,6 +554,19 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
   LOG.debug(Node offered to app:  + getName() +  reserved:  + 
reserved);
 }
 
+// Check the AM resource usage for the leaf queue
+if (!isAmRunning()  !getUnmanagedAM()) {
+  ListResourceRequest ask = appSchedulingInfo.getAllResourceRequests();
+  if (ask.isEmpty() || !getQueue().canRunAppAM(
+  ask.get(0).getCapability())) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Skipping allocation because maxAMShare limit would  +
+  be exceeded);
+}
+return Resources.none();
+  }
+}
+
 CollectionPriority prioritiesToTry = (reserved) ?
 Arrays.asList(node.getReservedContainer().getReservedPriority()) :
 getPriorities();
@@ -567,17 +583,6 @@ public class FSAppAttempt extends 
SchedulerApplicationAttempt
 
 addSchedulingOpportunity(priority);
 
-// Check the AM resource usage for the leaf queue
-if (getLiveContainers().size() == 0  !getUnmanagedAM()) {
-  if (!getQueue().canRunAppAM(getAMResource())) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug(Skipping allocation because maxAMShare limit would  +
-  be exceeded);
-}
-return Resources.none();
-  }
-}

[07/50] [abbrv] hadoop git commit: YARN-3425. NPE from RMNodeLabelsManager.serviceStop when NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)

2015-04-06 Thread zjshen
YARN-3425. NPE from RMNodeLabelsManager.serviceStop when 
NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a920ab07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a920ab07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a920ab07

Branch: refs/heads/YARN-2928
Commit: a920ab0779e9de764ba80d4d2c53b7cb6f6d6d36
Parents: f95a2de
Author: Wangda Tan wan...@apache.org
Authored: Wed Apr 1 10:14:48 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java   | 4 +++-
 2 files changed, 6 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a920ab07/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c48de2c..6fbc26e 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -185,6 +185,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3412. RM tests should use MockRM where possible. (kasha)
 
+YARN-3425. NPE from RMNodeLabelsManager.serviceStop when 
+NodeLabelsManager.serviceInit failed. (Bibin A Chundatt via wangda)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a920ab07/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
index a5e2756..fe38164 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/nodelabels/CommonNodeLabelsManager.java
@@ -258,7 +258,9 @@ public class CommonNodeLabelsManager extends 
AbstractService {
   // for UT purpose
   protected void stopDispatcher() {
 AsyncDispatcher asyncDispatcher = (AsyncDispatcher) dispatcher;
-asyncDispatcher.stop();
+if (null != asyncDispatcher) {
+  asyncDispatcher.stop();
+}
   }
   
   @Override



[13/50] [abbrv] hadoop git commit: HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed by Walter Su.

2015-04-06 Thread zjshen
HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..). Contributed 
by Walter Su.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b9fbc73e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b9fbc73e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b9fbc73e

Branch: refs/heads/YARN-2928
Commit: b9fbc73e57f2d64cbf7109f232d9c670ca6d7df6
Parents: 188b7d0
Author: Andrew Wang w...@apache.org
Authored: Wed Apr 1 12:53:25 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:11 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../apache/hadoop/hdfs/BlockReaderFactory.java  | 24 +---
 .../java/org/apache/hadoop/hdfs/HAUtil.java | 12 ++
 .../hdfs/server/datanode/BPServiceActor.java|  8 ---
 .../datanode/fsdataset/impl/FsDatasetCache.java |  8 ---
 .../server/namenode/FileJournalManager.java | 22 +++---
 .../hadoop/hdfs/server/namenode/NameNode.java   |  4 +++-
 .../hdfs/shortcircuit/ShortCircuitCache.java|  4 +++-
 .../tools/offlineImageViewer/FSImageLoader.java |  6 +++--
 .../hadoop/hdfs/util/LightWeightHashSet.java|  6 +++--
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  8 +--
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  | 16 +
 12 files changed, 78 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cba53a3..435fdd7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -373,6 +373,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8009. Signal congestion on the DataNode. (wheat9)
 
+HDFS-7978. Add LOG.isDebugEnabled() guard for some LOG.debug(..).
+(Walter Su via wang)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b9fbc73e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
index 1e915b2..8f33899 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderFactory.java
@@ -369,9 +369,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   return null;
 }
 if (clientContext.getDisableLegacyBlockReaderLocal()) {
-  PerformanceAdvisory.LOG.debug(this + : can't construct  +
-  BlockReaderLocalLegacy because  +
-  disableLegacyBlockReaderLocal is set.);
+PerformanceAdvisory.LOG.debug({}: can't construct  +
+BlockReaderLocalLegacy because  +
+disableLegacyBlockReaderLocal is set., this);
   return null;
 }
 IOException ioe = null;
@@ -410,8 +410,8 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   getPathInfo(inetSocketAddress, conf);
 }
 if (!pathInfo.getPathState().getUsableForShortCircuit()) {
-  PerformanceAdvisory.LOG.debug(this + :  + pathInfo +  is not  +
-  usable for short circuit; giving up on BlockReaderLocal.);
+  PerformanceAdvisory.LOG.debug({}: {} is not usable for short circuit;  
+
+  giving up on BlockReaderLocal., this, pathInfo);
   return null;
 }
 ShortCircuitCache cache = clientContext.getShortCircuitCache();
@@ -426,11 +426,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   throw exc;
 }
 if (info.getReplica() == null) {
-  if (LOG.isTraceEnabled()) {
-PerformanceAdvisory.LOG.debug(this + : failed to get  +
-ShortCircuitReplica. Cannot construct  +
-BlockReaderLocal via  + pathInfo.getPath());
-  }
+  PerformanceAdvisory.LOG.debug({}: failed to get  +
+  ShortCircuitReplica. Cannot construct  +
+  BlockReaderLocal via {}, this, pathInfo.getPath());
   return null;
 }
 return new BlockReaderLocal.Builder(conf).
@@ -610,9 +608,9 @@ public class BlockReaderFactory implements 
ShortCircuitReplicaCreator {
   getPathInfo(inetSocketAddress, conf);
 }
 if (!pathInfo.getPathState().getUsableForDataTransfer()) {
-  PerformanceAdvisory.LOG.debug(this + : not trying to create a  +
-  remote 

[23/50] [abbrv] hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B)

2015-04-06 Thread zjshen
HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4c3ce1a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4c3ce1a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4c3ce1a

Branch: refs/heads/YARN-2928
Commit: b4c3ce1ac62e734cc0f725ffda1e1319d2a05aca
Parents: 0e6b61e
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Apr 2 08:12:00 2015 +0530
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop/hdfs/server/datanode/ReportBadBlockAction.java| 8 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4c3ce1a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 518df9f..80d958d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -412,6 +412,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
+HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+goes for infinite loop (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4c3ce1a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index fd01a01..991b56d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -58,8 +59,11 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
 dnArr, uuids, types) };
 
 try {
-  bpNamenode.reportBadBlocks(locatedBlock);  
-} catch (IOException e){
+  bpNamenode.reportBadBlocks(locatedBlock);
+} catch (StandbyException e) {
+  DataNode.LOG.warn(Failed to report bad block  + block
+  +  to standby namenode);
+} catch (IOException e) {
   throw new BPServiceActorActionException(Failed to report bad block 
   + block +  to namenode: );
 }



[29/50] [abbrv] hadoop git commit: YARN-3365. Enhanced NodeManager to support using the 'tc' tool via container-executor for outbound network traffic control. Contributed by Sidharta Seethana.

2015-04-06 Thread zjshen
YARN-3365. Enhanced NodeManager to support using the 'tc' tool via 
container-executor for outbound network traffic control. Contributed by 
Sidharta Seethana.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f66d408
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f66d408
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f66d408

Branch: refs/heads/YARN-2928
Commit: 4f66d40802a81de710ad8b1909abee2e20e1007d
Parents: 503e490
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Thu Apr 2 16:53:59 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:14 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   4 +
 .../impl/container-executor.c   | 100 +++-
 .../impl/container-executor.h   |  34 +-
 .../main/native/container-executor/impl/main.c  | 465 +--
 .../nodemanager/TestLinuxContainerExecutor.java | 421 +++--
 5 files changed, 722 insertions(+), 302 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f66d408/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 18a3c37..bcd2286 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -112,6 +112,10 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3345. Add non-exclusive node label API. (Wangda Tan via jianhe)
 
+YARN-3365. Enhanced NodeManager to support using the 'tc' tool via
+container-executor for outbound network traffic control. (Sidharta Seethana
+via vinodkv)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f66d408/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index edfd25f..485399a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -40,6 +40,12 @@ static const int DEFAULT_MIN_USERID = 1000;
 
 static const char* DEFAULT_BANNED_USERS[] = {yarn, mapred, hdfs, bin, 
0};
 
+//location of traffic control binary
+static const char* TC_BIN = /sbin/tc;
+static const char* TC_MODIFY_STATE_OPTS [] = { -b , NULL};
+static const char* TC_READ_STATE_OPTS [] = { -b, NULL};
+static const char* TC_READ_STATS_OPTS [] = { -s,  -b, NULL};
+
 //struct to store the user details
 struct passwd *user_detail = NULL;
 
@@ -291,27 +297,20 @@ static int write_exit_code_file(const char* 
exit_code_file, int exit_code) {
   return 0;
 }
 
-/**
- * Wait for the container process to exit and write the exit code to
- * the exit code file.
- * Returns the exit code of the container process.
- */
-static int wait_and_write_exit_code(pid_t pid, const char* exit_code_file) {
+static int wait_and_get_exit_code(pid_t pid) {
   int child_status = -1;
   int exit_code = -1;
   int waitpid_result;
 
-  if (change_effective_user(nm_uid, nm_gid) != 0) {
-return -1;
-  }
   do {
-waitpid_result = waitpid(pid, child_status, 0);
+  waitpid_result = waitpid(pid, child_status, 0);
   } while (waitpid_result == -1  errno == EINTR);
+
   if (waitpid_result  0) {
-fprintf(LOGFILE, Error waiting for container process %d - %s\n,
-pid, strerror(errno));
+fprintf(LOGFILE, error waiting for process %d - %s\n, pid, 
strerror(errno));
 return -1;
   }
+
   if (WIFEXITED(child_status)) {
 exit_code = WEXITSTATUS(child_status);
   } else if (WIFSIGNALED(child_status)) {
@@ -319,9 +318,26 @@ static int wait_and_write_exit_code(pid_t pid, const char* 
exit_code_file) {
   } else {
 fprintf(LOGFILE, Unable to determine exit status for pid %d\n, pid);
   }
+
+  return exit_code;
+}
+
+/**
+ * Wait for the container process to exit and write the exit code to
+ * the exit code file.
+ * Returns the exit code of the container process.
+ */
+static int wait_and_write_exit_code(pid_t pid, const char* exit_code_file) {
+  int exit_code = -1;
+
+  if (change_effective_user(nm_uid, nm_gid) != 0) {
+return -1;
+  }
+  exit_code = wait_and_get_exit_code(pid);
   if 

[18/50] [abbrv] hadoop git commit: HDFS-7922. ShortCircuitCache#close is not releasing ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)

2015-04-06 Thread zjshen
HDFS-7922. ShortCircuitCache#close is not releasing 
ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/093e0925
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/093e0925
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/093e0925

Branch: refs/heads/YARN-2928
Commit: 093e0925a4459bb07e4093a30455016a815734d5
Parents: f4afce0
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Apr 1 16:02:39 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../hdfs/shortcircuit/ShortCircuitCache.java| 28 
 .../shortcircuit/TestShortCircuitCache.java |  2 +-
 3 files changed, 32 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/093e0925/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b5591e0..f265ead 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -409,6 +409,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-6945. BlockManager should remove a block from excessReplicateMap and
 decrement ExcessBlocks metric when the block is removed. (aajisaka)
 
+HDFS-7922. ShortCircuitCache#close is not releasing
+ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/093e0925/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
index 73c52d5..d1ec3b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/shortcircuit/ShortCircuitCache.java
@@ -916,6 +916,34 @@ public class ShortCircuitCache implements Closeable {
 } finally {
   lock.unlock();
 }
+
+releaserExecutor.shutdown();
+cleanerExecutor.shutdown();
+// wait for existing tasks to terminate
+try {
+  if (!releaserExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+LOG.error(Forcing SlotReleaserThreadPool to shutdown!);
+releaserExecutor.shutdownNow();
+  }
+} catch (InterruptedException e) {
+  releaserExecutor.shutdownNow();
+  Thread.currentThread().interrupt();
+  LOG.error(Interrupted while waiting for SlotReleaserThreadPool 
+  + to terminate, e);
+}
+
+// wait for existing tasks to terminate
+try {
+  if (!cleanerExecutor.awaitTermination(30, TimeUnit.SECONDS)) {
+LOG.error(Forcing CleanerThreadPool to shutdown!);
+cleanerExecutor.shutdownNow();
+  }
+} catch (InterruptedException e) {
+  cleanerExecutor.shutdownNow();
+  Thread.currentThread().interrupt();
+  LOG.error(Interrupted while waiting for CleanerThreadPool 
+  + to terminate, e);
+}
 IOUtils.cleanup(LOG, shmManager);
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/093e0925/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
index 7daabd0..7d26dee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/shortcircuit/TestShortCircuitCache.java
@@ -203,7 +203,7 @@ public class TestShortCircuitCache {
 cache.close();
   }
 
-  @Test(timeout=6)
+  @Test(timeout=10)
   public void testExpiry() throws Exception {
 final ShortCircuitCache cache =
 new ShortCircuitCache(2, 1, 1, 1000, 1, 1000, 0);



[47/50] [abbrv] hadoop git commit: HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.

2015-04-06 Thread zjshen
HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. Contributed by Li Lu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1069dcee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1069dcee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1069dcee

Branch: refs/heads/YARN-2928
Commit: 1069dceea24fe019e77619a545909e52430b4275
Parents: 3485a8d
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Sat Apr 4 13:52:01 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:17 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +
 .../jdiff/Apache_Hadoop_Common_2.6.0.xml| 45596 +
 .../jdiff/Apache_Hadoop_HDFS_2.6.0.xml  | 19520 +++
 hadoop-project-dist/pom.xml |24 +-
 hadoop-project/pom.xml  | 2 +
 5 files changed, 65137 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1069dcee/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index fd926aa..f52e09f 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1205,6 +1205,8 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11377. Added Null.java without which jdiff completely flops. 
(Tsuyoshi
 Ozawa via vinodkv)
+
+HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. (Li Lu via 
vinodkv)
 
 Release 2.6.1 - UNRELEASED
 



[17/50] [abbrv] hadoop git commit: HDFS-8008. Support client-side back off when the datanodes are congested. Contributed by Haohui Mai.

2015-04-06 Thread zjshen
HDFS-8008. Support client-side back off when the datanodes are congested. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f6d4143f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f6d4143f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f6d4143f

Branch: refs/heads/YARN-2928
Commit: f6d4143f32845a4230919c998269f59b9f3be11b
Parents: 61487d3
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 16:54:46 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:12 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../org/apache/hadoop/hdfs/DataStreamer.java| 63 
 .../hdfs/protocol/datatransfer/PipelineAck.java |  4 ++
 .../apache/hadoop/hdfs/TestDFSOutputStream.java | 42 +
 4 files changed, 112 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6d4143f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1d9e200..34c0556 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -868,6 +868,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7742. Favoring decommissioning node for replication can cause a block 
 to stay underreplicated for long periods (Nathan Roberts via kihwal)
 
+HDFS-8008. Support client-side back off when the datanodes are congested.
+(wheat9)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f6d4143f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
index 9c437ba..6ff4c24 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
@@ -218,6 +218,13 @@ class DataStreamer extends Daemon {
   private boolean failPacket = false;
   private final long dfsclientSlowLogThresholdMs;
   private long artificialSlowdown = 0;
+  // List of congested data nodes. The stream will back off if the DataNodes
+  // are congested
+  private final ArrayListDatanodeInfo congestedNodes = new ArrayList();
+  private static final int CONGESTION_BACKOFF_MEAN_TIME_IN_MS = 5000;
+  private static final int CONGESTION_BACK_OFF_MAX_TIME_IN_MS =
+  CONGESTION_BACKOFF_MEAN_TIME_IN_MS * 10;
+  private int lastCongestionBackoffTime;
 
   private final LoadingCacheDatanodeInfo, DatanodeInfo excludedNodes;
 
@@ -386,6 +393,11 @@ class DataStreamer extends Daemon {
 one = createHeartbeatPacket();
 assert one != null;
   } else {
+try {
+  backOffIfNecessary();
+} catch (InterruptedException e) {
+  DFSClient.LOG.warn(Caught exception , e);
+}
 one = dataQueue.getFirst(); // regular data packet
 long parents[] = one.getTraceParents();
 if (parents.length  0) {
@@ -815,9 +827,14 @@ class DataStreamer extends Daemon {
 
   long seqno = ack.getSeqno();
   // processes response status from datanodes.
+  ArrayListDatanodeInfo congestedNodesFromAck = new ArrayList();
   for (int i = ack.getNumOfReplies()-1; i =0   
dfsClient.clientRunning; i--) {
 final Status reply = PipelineAck.getStatusFromHeader(ack
 .getHeaderFlag(i));
+if (PipelineAck.getECNFromHeader(ack.getHeaderFlag(i)) ==
+PipelineAck.ECN.CONGESTED) {
+  congestedNodesFromAck.add(targets[i]);
+}
 // Restart will not be treated differently unless it is
 // the local node or the only one in the pipeline.
 if (PipelineAck.isRestartOOBStatus(reply) 
@@ -839,6 +856,18 @@ class DataStreamer extends Daemon {
 }
   }
 
+  if (!congestedNodesFromAck.isEmpty()) {
+synchronized (congestedNodes) {
+  congestedNodes.clear();
+  congestedNodes.addAll(congestedNodesFromAck);
+}
+  } else {
+synchronized (congestedNodes) {
+  congestedNodes.clear();
+  lastCongestionBackoffTime = 0;
+}
+  }
+
   assert seqno != PipelineAck.UNKOWN_SEQNO :
   Ack for unknown seqno 

[01/50] [abbrv] hadoop git commit: YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for public use and removing inconsistencies in the default values. (Junping Du and Karthik

2015-04-06 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 ae0a11167 - 8898d14c0


YARN-3304. Addendum patch. Cleaning up ResourceCalculatorProcessTree APIs for 
public use and removing inconsistencies in the default values. (Junping Du and 
Karthik Kambatla via vinodkv)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0834fe37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0834fe37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0834fe37

Branch: refs/heads/YARN-2928
Commit: 0834fe372ce55a4fd2b8566cbae95b7b908be71d
Parents: 71b4374
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 17:27:46 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:09 2015 -0700

--
 .../yarn/util/ProcfsBasedProcessTree.java   | 82 +++--
 .../util/ResourceCalculatorProcessTree.java | 55 ++-
 .../yarn/util/WindowsBasedProcessTree.java  | 24 -
 .../yarn/util/TestProcfsBasedProcessTree.java   | 96 ++--
 .../util/TestResourceCalculatorProcessTree.java | 10 ++
 .../yarn/util/TestWindowsBasedProcessTree.java  | 15 ++-
 6 files changed, 236 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0834fe37/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
index 9996a79..df9d28a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ProcfsBasedProcessTree.java
@@ -344,15 +344,23 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   public long getVirtualMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
+  if (p != null) {
 if (total == UNAVAILABLE ) {
   total = 0;
 }
-total += p.getVmem();
+if (p.getAge()  olderThanAge) {
+  total += p.getVmem();
+}
   }
 }
 return total;
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeVmem(int olderThanAge) {
+return getVirtualMemorySize(olderThanAge);
+  }
 
   @Override
   public long getRssMemorySize(int olderThanAge) {
@@ -365,13 +373,21 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
 boolean isAvailable = false;
 long totalPages = 0;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-totalPages += p.getRssmemPage();
+  if ((p != null) ) {
+if (p.getAge()  olderThanAge) {
+  totalPages += p.getRssmemPage();
+}
 isAvailable = true;
   }
 }
 return isAvailable ? totalPages * PAGE_SIZE : UNAVAILABLE; // convert # 
pages to byte
   }
+  
+  @Override
+  @SuppressWarnings(deprecation)
+  public long getCumulativeRssmem(int olderThanAge) {
+return getRssMemorySize(olderThanAge);
+  }
 
   /**
* Get the resident set size (RSS) memory used by all the processes
@@ -388,36 +404,42 @@ public class ProcfsBasedProcessTree extends 
ResourceCalculatorProcessTree {
   private long getSmapBasedRssMemorySize(int olderThanAge) {
 long total = UNAVAILABLE;
 for (ProcessInfo p : processTree.values()) {
-  if ((p != null)  (p.getAge()  olderThanAge)) {
-ProcessTreeSmapMemInfo procMemInfo = processSMAPTree.get(p.getPid());
-if (procMemInfo != null) {
-  for (ProcessSmapMemoryInfo info : procMemInfo.getMemoryInfoList()) {
-// Do not account for r--s or r-xs mappings
-if (info.getPermission().trim()
-  .equalsIgnoreCase(READ_ONLY_WITH_SHARED_PERMISSION)
-|| info.getPermission().trim()
-  .equalsIgnoreCase(READ_EXECUTE_WITH_SHARED_PERMISSION)) {
-  continue;
-}
-if (total == UNAVAILABLE){
-  total = 0;
-}
-total +=
-Math.min(info.sharedDirty, info.pss) + info.privateDirty
-+ info.privateClean;
-if (LOG.isDebugEnabled()) {
-  LOG.debug( total( + olderThanAge + ): PID :  + p.getPid()
-  + , SharedDirty :  + info.sharedDirty + , PSS : 
-   

[11/50] [abbrv] hadoop git commit: YARN-3248. Display count of nodes blacklisted by apps in the web UI. Contributed by Varun Vasudev

2015-04-06 Thread zjshen
YARN-3248. Display count of nodes blacklisted by apps in the web UI.
Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40f362b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40f362b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40f362b1

Branch: refs/heads/YARN-2928
Commit: 40f362b1cbbae7d86d478e781d099e4de5b6ac92
Parents: db2998e
Author: Xuan xg...@apache.org
Authored: Wed Apr 1 04:19:18 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../yarn/server/webapp/AppAttemptBlock.java |  66 +
 .../hadoop/yarn/server/webapp/AppBlock.java |  37 +++--
 .../hadoop/yarn/server/webapp/AppsBlock.java|  85 ++-
 .../scheduler/AppSchedulingInfo.java|   4 +
 .../scheduler/SchedulerApplicationAttempt.java  |   4 +
 .../webapp/AppsBlockWithMetrics.java|   2 +-
 .../webapp/CapacitySchedulerPage.java   |   2 +-
 .../webapp/RMAppAttemptBlock.java   |  67 +
 .../resourcemanager/webapp/RMAppBlock.java  | 110 ++
 .../resourcemanager/webapp/RMAppsBlock.java | 146 +++
 .../resourcemanager/webapp/RMWebServices.java   |   3 +-
 .../webapp/dao/AppAttemptInfo.java  |  19 ++-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 14 files changed, 468 insertions(+), 82 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40f362b1/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f18cf30..461f07d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -920,6 +920,9 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
+YARN-3248. Display count of nodes blacklisted by apps in the web UI.
+(Varun Vasudev via xgong)
+
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/40f362b1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
index dca39d6..8df94e6 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/AppAttemptBlock.java
@@ -72,7 +72,7 @@ public class AppAttemptBlock extends HtmlBlock {
 }
 
 UserGroupInformation callerUGI = getCallerUGI();
-ApplicationAttemptReport appAttemptReport = null;
+ApplicationAttemptReport appAttemptReport;
 try {
   final GetApplicationAttemptReportRequest request =
   GetApplicationAttemptReportRequest.newInstance(appAttemptId);
@@ -135,34 +135,7 @@ public class AppAttemptBlock extends HtmlBlock {
  appAttempt.getRpcPort()  65536) {
   node = appAttempt.getHost() + : + appAttempt.getRpcPort();
 }
-info(Application Attempt Overview)
-  ._(
-Application Attempt State:,
-appAttempt.getAppAttemptState() == null ? UNAVAILABLE : appAttempt
-  .getAppAttemptState())
-  ._(
-AM Container:,
-appAttempt.getAmContainerId() == null || containers == null
-|| !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
-? null : root_url(container, appAttempt.getAmContainerId()),
-String.valueOf(appAttempt.getAmContainerId()))
-  ._(Node:, node)
-  ._(
-Tracking URL:,
-appAttempt.getTrackingUrl() == null
-|| appAttempt.getTrackingUrl() == UNAVAILABLE ? null
-: root_url(appAttempt.getTrackingUrl()),
-appAttempt.getTrackingUrl() == null
-|| appAttempt.getTrackingUrl() == UNAVAILABLE
-? Unassigned
-: appAttempt.getAppAttemptState() == 
YarnApplicationAttemptState.FINISHED
-|| appAttempt.getAppAttemptState() == 
YarnApplicationAttemptState.FAILED
-|| appAttempt.getAppAttemptState() == 
YarnApplicationAttemptState.KILLED
-? 

[06/50] [abbrv] hadoop git commit: HDFS-7997. The first non-existing xattr should also throw IOException. (zhouyingchao via yliu)

2015-04-06 Thread zjshen
HDFS-7997. The first non-existing xattr should also throw IOException. 
(zhouyingchao via yliu)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6916c77b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6916c77b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6916c77b

Branch: refs/heads/YARN-2928
Commit: 6916c77b9dae46d7e2ce2d0a3a233f33f044a469
Parents: 49db780
Author: yliu y...@apache.org
Authored: Tue Mar 31 21:17:44 2015 +0800
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java | 3 ++-
 .../hadoop-hdfs/src/test/resources/testXAttrConf.xml | 4 ++--
 3 files changed, 7 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6916c77b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0bea916..e8075e6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -392,6 +392,9 @@ Release 2.8.0 - UNRELEASED
 DatanodeDescriptor#updateHeartbeatState() (Brahma Reddy Battula via Colin
 P. McCabe)
 
+HDFS-7997. The first non-existing xattr should also throw IOException.
+(zhouyingchao via yliu)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6916c77b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
index 45e63f2..d5c9124 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirXAttrOp.java
@@ -108,7 +108,8 @@ class FSDirXAttrOp {
   return filteredAll;
 }
 if (filteredAll == null || filteredAll.isEmpty()) {
-  return null;
+  throw new IOException(
+  At least one of the attributes provided was not found.);
 }
 ListXAttr toGet = Lists.newArrayListWithCapacity(xAttrs.size());
 for (XAttr xAttr : xAttrs) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6916c77b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
index 9c66cba..c2e836c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testXAttrConf.xml
@@ -420,8 +420,8 @@
   /cleanup-commands
   comparators
comparator
-  typeExactComparator/type
-  expected-output# file: /file1#LF#/expected-output
+  typeSubstringComparator/type
+  expected-outputAt least one of the attributes provided was not 
found/expected-output
 /comparator
   /comparators
 /test



[42/50] [abbrv] hadoop git commit: HDFS-8039. Fix TestDebugAdmin#testRecoverLease and testVerfiyBlockChecksumCommand on Windows. Contributed by Xiaoyu Yao.

2015-04-06 Thread zjshen
HDFS-8039. Fix TestDebugAdmin#testRecoverLease and 
testVerfiyBlockChecksumCommand on Windows. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4aff1e7b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4aff1e7b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4aff1e7b

Branch: refs/heads/YARN-2928
Commit: 4aff1e7bf7866f547163a09b750a14a70fa0602e
Parents: def2277
Author: cnauroth cnaur...@apache.org
Authored: Fri Apr 3 10:44:02 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:16 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../org/apache/hadoop/hdfs/tools/TestDebugAdmin.java | 15 ---
 2 files changed, 11 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aff1e7b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 366d8fb..ae56f1b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1370,6 +1370,9 @@ Release 2.7.0 - UNRELEASED
 
TestDFSClientRetries#testDFSClientConfigurationLocateFollowingBlockInitialDelay
 for Windows. (Xiaoyu Yao via cnauroth)
 
+HDFS-8039. Fix TestDebugAdmin#testRecoverLease and
+testVerifyBlockChecksumCommand on Windows. (Xiaoyu Yao via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4aff1e7b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
index 44b6ba9..52b194d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestDebugAdmin.java
@@ -77,17 +77,18 @@ public class TestDebugAdmin {
   System.setOut(oldOut);
   IOUtils.closeStream(out);
 }
-return ret:  + ret + ,  + bytes.toString();
+return ret:  + ret + ,  +
+bytes.toString().replaceAll(System.getProperty(line.separator), );
   }
 
   @Test(timeout = 6)
   public void testRecoverLease() throws Exception {
-assertEquals(ret: 1, You must supply a -path argument to recoverLease.\n,
+assertEquals(ret: 1, You must supply a -path argument to recoverLease.,
 runCmd(new String[]{recoverLease, -retries, 1}));
 FSDataOutputStream out = fs.create(new Path(/foo));
 out.write(123);
 out.close();
-assertEquals(ret: 0, recoverLease SUCCEEDED on /foo\n,
+assertEquals(ret: 0, recoverLease SUCCEEDED on /foo,
 runCmd(new String[]{recoverLease, -path, /foo}));
   }
 
@@ -98,18 +99,18 @@ public class TestDebugAdmin {
 ExtendedBlock block = DFSTestUtil.getFirstBlock(fs, new Path(/bar));
 File blockFile = getBlockFile(fsd,
 block.getBlockPoolId(), block.getLocalBlock());
-assertEquals(ret: 1, You must specify a meta file with -meta\n,
+assertEquals(ret: 1, You must specify a meta file with -meta,
 runCmd(new String[]{verify, -block, blockFile.getAbsolutePath()}));
 File metaFile = getMetaFile(fsd,
 block.getBlockPoolId(), block.getLocalBlock());
 assertEquals(ret: 0, Checksum type:  +
-  DataChecksum(type=CRC32C, chunkSize=512)\n,
+  DataChecksum(type=CRC32C, chunkSize=512),
 runCmd(new String[]{verify,
 -meta, metaFile.getAbsolutePath()}));
 assertEquals(ret: 0, Checksum type:  +
-  DataChecksum(type=CRC32C, chunkSize=512)\n +
+  DataChecksum(type=CRC32C, chunkSize=512) +
   Checksum verification succeeded on block file  +
-  blockFile.getAbsolutePath() + \n,
+  blockFile.getAbsolutePath(),
 runCmd(new String[]{verify,
 -meta, metaFile.getAbsolutePath(),
 -block, blockFile.getAbsolutePath()})



[24/50] [abbrv] hadoop git commit: HADOOP-11731. Rework the changelog and releasenotes (aw)

2015-04-06 Thread zjshen
HADOOP-11731. Rework the changelog and releasenotes (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e6b61eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e6b61eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e6b61eb

Branch: refs/heads/YARN-2928
Commit: 0e6b61ebe68350d7c4f1d5eaeb97e8977eab0b2f
Parents: ccf3fd1
Author: Allen Wittenauer a...@apache.org
Authored: Wed Apr 1 17:52:22 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:13 2015 -0700

--
 BUILDING.txt|   7 +-
 dev-support/releasedocmaker.py  | 460 +++
 dev-support/relnotes.py | 274 ---
 hadoop-common-project/hadoop-common/CHANGES.txt |   2 +
 hadoop-common-project/hadoop-common/pom.xml |  51 ++
 hadoop-project/src/site/site.xml|   6 +-
 6 files changed, 518 insertions(+), 282 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e6b61eb/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 02b8610..f3b6853 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -73,7 +73,7 @@ Where to run Maven from?
 
--
 Maven build goals:
 
- * Clean : mvn clean
+ * Clean : mvn clean [-Preleasedocs]
  * Compile   : mvn compile [-Pnative]
  * Run tests : mvn test [-Pnative]
  * Create JAR: mvn package
@@ -84,7 +84,7 @@ Maven build goals:
  * Run clover: mvn test -Pclover 
[-DcloverLicenseLocation=${user.name}/.clover.license]
  * Run Rat   : mvn apache-rat:check
  * Build javadocs: mvn javadoc:javadoc
- * Build distribution: mvn package 
[-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar]
+ * Build distribution: mvn package 
[-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar][-Preleasedocs]
  * Change Hadoop version : mvn versions:set -DnewVersion=NEWVERSION
 
  Build options:
@@ -93,6 +93,7 @@ Maven build goals:
   * Use -Pdocs to generate  bundle the documentation in the distribution 
(using -Pdist)
   * Use -Psrc to create a project source TAR.GZ
   * Use -Dtar to create a TAR with the distribution (using -Pdist)
+  * Use -Preleasedocs to include the changelog and release docs (requires 
Internet connectivity)
 
  Snappy build options:
 
@@ -203,7 +204,7 @@ Create source and binary distributions with native code and 
documentation:
 
 Create a local staging version of the website (in /tmp/hadoop-site)
 
-  $ mvn clean site; mvn site:stage -DstagingDirectory=/tmp/hadoop-site
+  $ mvn clean site -Preleasedocs; mvn site:stage 
-DstagingDirectory=/tmp/hadoop-site
 
 
--
 Installing Hadoop

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e6b61eb/dev-support/releasedocmaker.py
--
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
new file mode 100755
index 000..b00c1a7
--- /dev/null
+++ b/dev-support/releasedocmaker.py
@@ -0,0 +1,460 @@
+#!/usr/bin/env python
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from glob import glob
+from optparse import OptionParser
+import os
+import re
+import sys
+import urllib
+try:
+  import json
+except ImportError:
+  import simplejson as json
+
+releaseVersion={}
+namePattern = re.compile(r' \([0-9]+\)')
+
+def clean(str):
+  return tableclean(re.sub(namePattern, , str))
+
+def formatComponents(str):
+  str = re.sub(namePattern, '', str).replace(', )
+  if str != :
+ret = str
+  else:
+# some markdown parsers don't like empty tables
+ret = .
+  return clean(ret)
+
+# convert to utf-8
+# protect some known md metachars
+# or chars that screw up doxia
+def tableclean(str):
+  str=str.encode('utf-8')
+  str=str.replace(_,\_)
+  

[37/50] [abbrv] hadoop git commit: HDFS-8034. Fix TestDFSClientRetries#testDFSClientConfigurationLocateFollowingBlockInitialDelay for Windows. Contributed by Xiaoyu Yao.

2015-04-06 Thread zjshen
HDFS-8034. Fix 
TestDFSClientRetries#testDFSClientConfigurationLocateFollowingBlockInitialDelay 
for Windows. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/310c997b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/310c997b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/310c997b

Branch: refs/heads/YARN-2928
Commit: 310c997b56c53e660be31809cb4da74114824666
Parents: ff35b52
Author: cnauroth cnaur...@apache.org
Authored: Fri Apr 3 10:10:11 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:15 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  4 +++
 .../hadoop/hdfs/TestDFSClientRetries.java   | 33 +++-
 2 files changed, 22 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/310c997b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 04eac75..366d8fb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1366,6 +1366,10 @@ Release 2.7.0 - UNRELEASED
 HDFS-7954. TestBalancer#testBalancerWithPinnedBlocks should not be executed
 on Windows.  (Xiaoyu Yao via szetszwo)
 
+HDFS-8034. Fix
+
TestDFSClientRetries#testDFSClientConfigurationLocateFollowingBlockInitialDelay
+for Windows. (Xiaoyu Yao via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/310c997b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
index 3912774..c4258eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
@@ -1136,20 +1136,23 @@ public class TestDFSClientRetries {
   throws Exception {
 // test if DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY
 // is not configured, verify DFSClient uses the default value 400.
-Configuration dfsConf = new HdfsConfiguration();
-MiniDFSCluster cluster = new MiniDFSCluster.Builder(dfsConf).build();
-cluster.waitActive();
-NamenodeProtocols nn = cluster.getNameNodeRpc();
-DFSClient client = new DFSClient(null, nn, dfsConf, null);
-assertEquals(client.getConf().
-getBlockWriteLocateFollowingInitialDelayMs(), 400);
-
-// change DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
-// verify DFSClient uses the configured value 1000.
-dfsConf.setInt(DFSConfigKeys.
-DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY, 1000);
-client = new DFSClient(null, nn, dfsConf, null);
-assertEquals(client.getConf().
-getBlockWriteLocateFollowingInitialDelayMs(), 1000);
+MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+try {
+  cluster.waitActive();
+  NamenodeProtocols nn = cluster.getNameNodeRpc();
+  DFSClient client = new DFSClient(null, nn, conf, null);
+  assertEquals(client.getConf().
+  getBlockWriteLocateFollowingInitialDelayMs(), 400);
+
+  // change DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY,
+  // verify DFSClient uses the configured value 1000.
+  conf.setInt(DFSConfigKeys.
+  DFS_CLIENT_BLOCK_WRITE_LOCATEFOLLOWINGBLOCK_INITIAL_DELAY_KEY, 1000);
+  client = new DFSClient(null, nn, conf, null);
+  assertEquals(client.getConf().
+  getBlockWriteLocateFollowingInitialDelayMs(), 1000);
+} finally {
+  cluster.shutdown();
+}
   }
 }



[10/50] [abbrv] hadoop git commit: YARN-3428. Debug log resources to be localized for a container. (kasha)

2015-04-06 Thread zjshen
YARN-3428. Debug log resources to be localized for a container. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b62eb19e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b62eb19e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b62eb19e

Branch: refs/heads/YARN-2928
Commit: b62eb19ef02a23a766e4d8fa5172d57a36aec849
Parents: 0834fe3
Author: Karthik Kambatla ka...@apache.org
Authored: Tue Mar 31 17:34:40 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  2 ++
 .../localizer/ResourceLocalizationService.java  | 12 ++--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b62eb19e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3cf9847..dab8116 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -145,6 +145,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3258. FairScheduler: Need to add more logging to investigate
 allocations. (Anubhav Dhoot via ozawa)
 
+YARN-3428. Debug log resources to be localized for a container. (kasha)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b62eb19e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index dd50ead..4236392 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -448,6 +448,10 @@ public class ResourceLocalizationService extends 
CompositeService
   .getApplicationId());
   for (LocalResourceRequest req : e.getValue()) {
 tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt));
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Localizing  + req.getPath() +
+   for container  + c.getContainerId());
+}
   }
 }
   }
@@ -456,10 +460,14 @@ public class ResourceLocalizationService extends 
CompositeService
 ResourceRetentionSet retain =
   new ResourceRetentionSet(delService, cacheTargetSize);
 retain.addResources(publicRsrc);
-LOG.debug(Resource cleanup (public)  + retain);
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Resource cleanup (public)  + retain);
+}
 for (LocalResourcesTracker t : privateRsrc.values()) {
   retain.addResources(t);
-  LOG.debug(Resource cleanup  + t.getUser() + : + retain);
+  if (LOG.isDebugEnabled()) {
+LOG.debug(Resource cleanup  + t.getUser() + : + retain);
+  }
 }
 //TODO Check if appRsrcs should also be added to the retention set.
   }



[08/50] [abbrv] hadoop git commit: YARN-3248. Correct fix version from branch-2.7 to branch-2.8 in the change log.

2015-04-06 Thread zjshen
YARN-3248. Correct fix version from branch-2.7 to branch-2.8 in the change log.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f95a2de4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f95a2de4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f95a2de4

Branch: refs/heads/YARN-2928
Commit: f95a2de4a3a1b3846a82e7cd34b1140b7154c0f0
Parents: 40f362b
Author: Xuan xg...@apache.org
Authored: Wed Apr 1 04:32:11 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f95a2de4/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 461f07d..c48de2c 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -150,6 +150,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring
 from info to debug. (Anubhav Dhoot via ozawa)
 
+YARN-3248. Display count of nodes blacklisted by apps in the web UI.
+(Varun Vasudev via xgong)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not
@@ -920,9 +923,6 @@ Release 2.7.0 - UNRELEASED
 removing inconsistencies in the default values. (Junping Du and Karthik
 Kambatla via vinodkv)
 
-YARN-3248. Display count of nodes blacklisted by apps in the web UI.
-(Varun Vasudev via xgong)
-
 Release 2.6.0 - 2014-11-18
 
   INCOMPATIBLE CHANGES



[43/50] [abbrv] hadoop git commit: HADOOP-11377. Added Null.java without which jdiff completely flops. Contributed by Tsuyoshi Ozawa.

2015-04-06 Thread zjshen
HADOOP-11377. Added Null.java without which jdiff completely flops. Contributed 
by Tsuyoshi Ozawa.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3485a8d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3485a8d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3485a8d7

Branch: refs/heads/YARN-2928
Commit: 3485a8d7b42817ddbf4b13c68bf9c195806c1afc
Parents: e1ffb3e
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Sat Apr 4 13:47:08 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:16 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 .../hadoop-common/dev-support/jdiff/Null.java   | 20 
 .../hadoop-hdfs/dev-support/jdiff/Null.java | 20 
 3 files changed, 43 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3485a8d7/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index d2d1181..fd926aa 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1202,6 +1202,9 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP
 server (brandonli)
+
+HADOOP-11377. Added Null.java without which jdiff completely flops. 
(Tsuyoshi
+Ozawa via vinodkv)
 
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3485a8d7/hadoop-common-project/hadoop-common/dev-support/jdiff/Null.java
--
diff --git a/hadoop-common-project/hadoop-common/dev-support/jdiff/Null.java 
b/hadoop-common-project/hadoop-common/dev-support/jdiff/Null.java
new file mode 100644
index 000..7b00145
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/dev-support/jdiff/Null.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class Null {
+  public Null() { }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3485a8d7/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Null.java
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Null.java 
b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Null.java
new file mode 100644
index 000..7b00145
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/dev-support/jdiff/Null.java
@@ -0,0 +1,20 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+public class Null {
+  public Null() { }
+}



[39/50] [abbrv] hadoop git commit: MAPREDUCE-4844. Counters / AbstractCounters have constant references not declared final. (Brahma Reddy Battula via gera)

2015-04-06 Thread zjshen
MAPREDUCE-4844. Counters / AbstractCounters have constant references not 
declared final. (Brahma Reddy Battula via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/def22772
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/def22772
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/def22772

Branch: refs/heads/YARN-2928
Commit: def2277201a7221c8937f25da13801fd56434c7b
Parents: 310c997
Author: Gera Shegalov g...@apache.org
Authored: Fri Apr 3 10:24:02 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:16 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/mapred/Counters.java   | 6 +++---
 .../org/apache/hadoop/mapreduce/counters/AbstractCounters.java | 6 +++---
 3 files changed, 9 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/def22772/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 160c49c..c1eb6c3 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -321,6 +321,9 @@ Release 2.8.0 - UNRELEASED
 causes counter limits are not reset correctly.
 (Zhihai Xu via harsh)
 
+MAPREDUCE-4844. Counters / AbstractCounters have constant references not
+declared final. (Brahma Reddy Battula via gera)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def22772/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
index b9cb210..c9e0b9f 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Counters.java
@@ -62,9 +62,9 @@ import com.google.common.collect.Iterators;
 public class Counters
 extends AbstractCountersCounters.Counter, Counters.Group {
   
-  public static int MAX_COUNTER_LIMIT = Limits.getCountersMax();
-  public static int MAX_GROUP_LIMIT = Limits.getGroupsMax();
-  private static HashMapString, String depricatedCounterMap =
+  public static final int MAX_COUNTER_LIMIT = Limits.getCountersMax();
+  public static final int MAX_GROUP_LIMIT = Limits.getGroupsMax();
+  private static final HashMapString, String depricatedCounterMap =
   new HashMapString, String();
   
   static {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/def22772/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
index dd81ebb..e6e74da 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/counters/AbstractCounters.java
@@ -64,11 +64,11 @@ public abstract class AbstractCountersC extends Counter,
   /**
* A cache from enum values to the associated counter.
*/
-  private MapEnum?, C cache = Maps.newIdentityHashMap();
+  private final MapEnum?, C cache = Maps.newIdentityHashMap();
   //framework  fs groups
-  private MapString, G fgroups = new ConcurrentSkipListMapString, G();
+  private final MapString, G fgroups = new ConcurrentSkipListMapString, 
G();
   // other groups
-  private MapString, G groups = new ConcurrentSkipListMapString, G();
+  private final MapString, G groups = new ConcurrentSkipListMapString, G();
   private final CounterGroupFactoryC, G groupFactory;
 
   // For framework counter serialization without strings



[49/50] [abbrv] hadoop git commit: YARN-3435. AM container to be allocated Appattempt AM container shown as null. Contributed by Bibin A Chundatt

2015-04-06 Thread zjshen
YARN-3435. AM container to be allocated Appattempt AM container shown as
null. Contributed by Bibin A Chundatt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b09b406
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b09b406
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b09b406

Branch: refs/heads/YARN-2928
Commit: 0b09b406c6d09e031357ad84a810c9ddfe4a922d
Parents: 1069dce
Author: Xuan xg...@apache.org
Authored: Sun Apr 5 00:40:57 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:17 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 3 +++
 .../yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b09b406/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 004044a..0c5aac8 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -198,6 +198,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3415. Non-AM containers can be counted towards amResourceUsage of a
 Fair Scheduler queue (Zhihai Xu via Sandy Ryza)
 
+YARN-3435. AM container to be allocated Appattempt AM container shown as 
null.
+(Bibin A Chundatt via xgong)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b09b406/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
index 1831920..506e31f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMAppAttemptBlock.java
@@ -209,7 +209,7 @@ public class RMAppAttemptBlock extends AppAttemptBlock{
 AM Container:,
 appAttempt.getAmContainerId() == null || containers == null
 || !hasAMContainer(appAttemptReport.getAMContainerId(), containers)
-? null : root_url(container, appAttempt.getAmContainerId()),
+? N/A : root_url(container, appAttempt.getAmContainerId()),
 String.valueOf(appAttempt.getAmContainerId()))
   ._(Node:, node)
   ._(



[19/50] [abbrv] hadoop git commit: HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than DFSOutputStream#writeChunk (cmccabe)

2015-04-06 Thread zjshen
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than 
DFSOutputStream#writeChunk (cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f4afce08
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f4afce08
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f4afce08

Branch: refs/heads/YARN-2928
Commit: f4afce0801bc488e301aee820a8091252bbc2748
Parents: f066e73
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Wed Apr 1 13:55:40 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:12 2015 -0700

--
 .../org/apache/hadoop/fs/FSOutputSummer.java| 20 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSOutputStream.java | 15 ---
 .../org/apache/hadoop/tracing/TestTracing.java  |  4 ++--
 4 files changed, 25 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4afce08/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index 13a5e26..d2998b6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.fs;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.DataChecksum;
+import org.apache.htrace.NullScope;
+import org.apache.htrace.TraceScope;
 
 import java.io.IOException;
 import java.io.OutputStream;
@@ -194,16 +196,26 @@ abstract public class FSOutputSummer extends OutputStream 
{
 return sum.getChecksumSize();
   }
 
+  protected TraceScope createWriteTraceScope() {
+return NullScope.INSTANCE;
+  }
+
   /** Generate checksums for the given data chunks and output chunks  
checksums
* to the underlying output stream.
*/
   private void writeChecksumChunks(byte b[], int off, int len)
   throws IOException {
 sum.calculateChunkedSums(b, off, len, checksum, 0);
-for (int i = 0; i  len; i += sum.getBytesPerChecksum()) {
-  int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
-  int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
-  writeChunk(b, off + i, chunkLen, checksum, ckOffset, getChecksumSize());
+TraceScope scope = createWriteTraceScope();
+try {
+  for (int i = 0; i  len; i += sum.getBytesPerChecksum()) {
+int chunkLen = Math.min(sum.getBytesPerChecksum(), len - i);
+int ckOffset = i / sum.getBytesPerChecksum() * getChecksumSize();
+writeChunk(b, off + i, chunkLen, checksum, ckOffset,
+getChecksumSize());
+  }
+} finally {
+  scope.close();
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4afce08/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 435fdd7..b5591e0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -378,6 +378,9 @@ Release 2.8.0 - UNRELEASED
 
   OPTIMIZATIONS
 
+HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
+DFSOutputStream#writeChunk (cmccabe)
+
   BUG FIXES
 
 HDFS-7501. TransactionsSinceLastCheckpoint can be negative on SBNs.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f4afce08/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 933d8e6..c88639d 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -372,21 +372,14 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
 
+  protected TraceScope createWriteTraceScope() {
+return dfsClient.getPathTraceScope(DFSOutputStream#write, src);
+  }
+
   // @see FSOutputSummer#writeChunk()
   @Override
   protected synchronized void writeChunk(byte[] b, int offset, int len,
   byte[] checksum, int ckoff, int cklen) 

[41/50] [abbrv] hadoop git commit: HADOOP-11785. Reduce the number of listStatus operation in distcp buildListing (Zoran Dimitrijevic via Colin P. McCabe)

2015-04-06 Thread zjshen
HADOOP-11785. Reduce the number of listStatus operation in distcp buildListing 
(Zoran Dimitrijevic via Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb13303d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb13303d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb13303d

Branch: refs/heads/YARN-2928
Commit: fb13303de639de8b45c78e91d22c85774ad4252e
Parents: 4aff1e7
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Fri Apr 3 14:08:25 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:16 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/tools/SimpleCopyListing.java  | 41 +---
 2 files changed, 21 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb13303d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7dcf8c1..d2d1181 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -483,6 +483,9 @@ Release 2.8.0 - UNRELEASED
 
   OPTIMIZATIONS
 
+HADOOP-11785. Reduce the number of listStatus operation in distcp
+buildListing (Zoran Dimitrijevic via Colin P. McCabe)
+
   BUG FIXES
 
 HADOOP-10027. *Compressor_deflateBytesDirect passes instance instead of

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb13303d/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
--
diff --git 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
index 6dc827a..e8a23aa 100644
--- 
a/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
+++ 
b/hadoop-tools/hadoop-distcp/src/main/java/org/apache/hadoop/tools/SimpleCopyListing.java
@@ -193,12 +193,12 @@ public class SimpleCopyListing extends CopyListing {
 writeToFileListing(fileListWriter, sourceCopyListingStatus,
 sourcePathRoot, options);
 
-if (isDirectoryAndNotEmpty(sourceFS, sourceStatus)) {
+if (sourceStatus.isDirectory()) {
   if (LOG.isDebugEnabled()) {
-LOG.debug(Traversing non-empty source dir:  + 
sourceStatus.getPath());
+LOG.debug(Traversing source dir:  + sourceStatus.getPath());
   }
-  traverseNonEmptyDirectory(fileListWriter, sourceStatus, 
sourcePathRoot,
-  options);
+  traverseDirectory(fileListWriter, sourceFS, sourceStatus,
+sourcePathRoot, options);
 }
   }
 }
@@ -275,22 +275,17 @@ public class SimpleCopyListing extends CopyListing {
 
SequenceFile.Writer.compression(SequenceFile.CompressionType.NONE));
   }
 
-  private static boolean isDirectoryAndNotEmpty(FileSystem fileSystem,
-FileStatus fileStatus) throws IOException {
-return fileStatus.isDirectory()  getChildren(fileSystem, 
fileStatus).length  0;
-  }
-
   private static FileStatus[] getChildren(FileSystem fileSystem,
  FileStatus parent) throws IOException 
{
 return fileSystem.listStatus(parent.getPath());
   }
 
-  private void traverseNonEmptyDirectory(SequenceFile.Writer fileListWriter,
- FileStatus sourceStatus,
- Path sourcePathRoot,
- DistCpOptions options)
- throws IOException {
-FileSystem sourceFS = sourcePathRoot.getFileSystem(getConf());
+  private void traverseDirectory(SequenceFile.Writer fileListWriter,
+ FileSystem sourceFS,
+ FileStatus sourceStatus,
+ Path sourcePathRoot,
+ DistCpOptions options)
+ throws IOException {
 final boolean preserveAcls = options.shouldPreserve(FileAttribute.ACL);
 final boolean preserveXAttrs = options.shouldPreserve(FileAttribute.XATTR);
 final boolean preserveRawXattrs = options.shouldPreserveRawXattrs();
@@ -299,9 +294,9 @@ public class SimpleCopyListing extends CopyListing {
 
 while (!pathStack.isEmpty()) {
   for (FileStatus child: getChildren(sourceFS, pathStack.pop())) {
-if (LOG.isDebugEnabled())
-  

[21/50] [abbrv] hadoop git commit: HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP server. Contributed by Brandon Li

2015-04-06 Thread zjshen
HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP 
server. Contributed by Brandon Li


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/09686762
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/09686762
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/09686762

Branch: refs/heads/YARN-2928
Commit: 096867629333d4b2de4e7b971b758fbabc11d1d3
Parents: f6d4143
Author: Brandon Li brando...@apache.org
Authored: Wed Apr 1 17:04:44 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:12 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  5 ++-
 .../org/apache/hadoop/mount/MountdBase.java | 26 +--
 .../org/apache/hadoop/nfs/nfs3/Nfs3Base.java| 15 +++--
 .../apache/hadoop/oncrpc/SimpleTcpServer.java   | 31 --
 .../apache/hadoop/oncrpc/SimpleUdpServer.java   | 33 +---
 5 files changed, 82 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/09686762/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 111fb5e..fa98a0c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1186,7 +1186,10 @@ Release 2.7.0 - UNRELEASED
 
 HADOOP-11787. OpensslSecureRandom.c pthread_threadid_np usage signature is
 wrong on 32-bit Mac. (Kiran Kumar M R via cnauroth)
-
+
+HADOOP-11757. NFS gateway should shutdown when it can't start UDP or TCP
+server (brandonli)
+
 Release 2.6.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09686762/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
index 8d7d6dc..92ca7ec 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/mount/MountdBase.java
@@ -60,7 +60,17 @@ abstract public class MountdBase {
 SimpleUdpServer udpServer = new SimpleUdpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-udpServer.run();
+try {
+  udpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the UDP server., e);
+  if (udpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_UDP,
+udpServer.getBoundPort());
+  }
+  udpServer.shutdown();
+  terminate(1, e);
+}
 udpBoundPort = udpServer.getBoundPort();
   }
 
@@ -69,7 +79,17 @@ abstract public class MountdBase {
 SimpleTcpServer tcpServer = new SimpleTcpServer(rpcProgram.getPort(),
 rpcProgram, 1);
 rpcProgram.startDaemons();
-tcpServer.run();
+try {
+  tcpServer.run();
+} catch (Throwable e) {
+  LOG.fatal(Failed to start the TCP server., e);
+  if (tcpServer.getBoundPort()  0) {
+rpcProgram.unregister(PortmapMapping.TRANSPORT_TCP,
+tcpServer.getBoundPort());
+  }
+  tcpServer.shutdown();
+  terminate(1, e);
+}
 tcpBoundPort = tcpServer.getBoundPort();
   }
 
@@ -83,7 +103,7 @@ abstract public class MountdBase {
 rpcProgram.register(PortmapMapping.TRANSPORT_UDP, udpBoundPort);
 rpcProgram.register(PortmapMapping.TRANSPORT_TCP, tcpBoundPort);
   } catch (Throwable e) {
-LOG.fatal(Failed to start the server. Cause:, e);
+LOG.fatal(Failed to register the MOUNT service., e);
 terminate(1, e);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/09686762/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
--
diff --git 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
index 40744bc..80faca5 100644
--- 
a/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
+++ 
b/hadoop-common-project/hadoop-nfs/src/main/java/org/apache/hadoop/nfs/nfs3/Nfs3Base.java
@@ -29,7 +29,6 @@ import static org.apache.hadoop.util.ExitUtil.terminate;
 
 /**
  * Nfs server. Supports NFS v3 using {@link RpcProgram}.
- * Currently Mountd program is also started inside this 

[04/50] [abbrv] hadoop git commit: YARN-3412. RM tests should use MockRM where possible. (kasha)

2015-04-06 Thread zjshen
YARN-3412. RM tests should use MockRM where possible. (kasha)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b9d546c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b9d546c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b9d546c

Branch: refs/heads/YARN-2928
Commit: 5b9d546c479dc41356aad99038003349d810f84c
Parents: 8af286f
Author: Karthik Kambatla ka...@apache.org
Authored: Tue Mar 31 09:14:15 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt   | 2 ++
 .../yarn/server/resourcemanager/TestMoveApplication.java  | 3 +--
 .../yarn/server/resourcemanager/TestResourceManager.java  | 6 ++
 .../server/resourcemanager/monitor/TestSchedulingMonitor.java | 3 ++-
 .../server/resourcemanager/recovery/TestZKRMStateStore.java   | 7 +++
 .../resourcemanager/scheduler/fair/TestFairScheduler.java | 3 +--
 .../scheduler/fair/TestFairSchedulerEventLog.java | 4 ++--
 .../resourcemanager/scheduler/fifo/TestFifoScheduler.java | 5 ++---
 8 files changed, 15 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9d546c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 2466642..3cf9847 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -175,6 +175,8 @@ Release 2.8.0 - UNRELEASED
 YARN-3400. [JDK 8] Build Failure due to unreported exceptions in
 RPCUtil (rkanter)
 
+YARN-3412. RM tests should use MockRM where possible. (kasha)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9d546c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
index 36153de..d2bde80 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestMoveApplication.java
@@ -52,8 +52,7 @@ public class TestMoveApplication {
 FifoSchedulerWithMove.class);
 conf.set(YarnConfiguration.YARN_ADMIN_ACL,  );
 conf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
-resourceManager = new ResourceManager();
-resourceManager.init(conf);
+resourceManager = new MockRM(conf);
 
resourceManager.getRMContext().getContainerTokenSecretManager().rollMasterKey();
 resourceManager.getRMContext().getNMTokenSecretManager().rollMasterKey();
 resourceManager.start();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b9d546c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
index 6735575..fbf54fc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
@@ -212,9 +212,8 @@ public class TestResourceManager {
   public void testResourceManagerInitConfigValidation() throws Exception {
 Configuration conf = new YarnConfiguration();
 conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, -1);
-resourceManager = new ResourceManager();
 try {
-  resourceManager.init(conf);
+  resourceManager = new MockRM(conf);
   fail(Exception is 

[15/50] [abbrv] hadoop git commit: HDFS-8009. Signal congestion on the DataNode. Contributed by Haohui Mai.

2015-04-06 Thread zjshen
HDFS-8009. Signal congestion on the DataNode. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d7c0758
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d7c0758
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d7c0758

Branch: refs/heads/YARN-2928
Commit: 0d7c0758b7079ed1660f124d2adcc91ccc9e7289
Parents: a920ab0
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 10:56:53 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:11 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  2 ++
 .../apache/hadoop/hdfs/server/datanode/DataNode.java| 12 ++--
 2 files changed, 12 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d7c0758/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 4247ea6..cba53a3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -371,6 +371,8 @@ Release 2.8.0 - UNRELEASED
 HDFS-7671. hdfs user guide should point to the common rack awareness doc.
 (Kai Sasaki via aajisaka)
 
+HDFS-8009. Signal congestion on the DataNode. (wheat9)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d7c0758/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 071aba1..50dccb8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -354,6 +354,9 @@ public class DataNode extends ReconfigurableBase
   private String dnUserName = null;
 
   private SpanReceiverHost spanReceiverHost;
+  private static final int NUM_CORES = Runtime.getRuntime()
+  .availableProcessors();
+  private static final double CONGESTION_RATIO = 1.5;
 
   /**
* Creates a dummy DataNode for testing purpose.
@@ -484,8 +487,13 @@ public class DataNode extends ReconfigurableBase
* /ul
*/
   public PipelineAck.ECN getECN() {
-return pipelineSupportECN ? PipelineAck.ECN.SUPPORTED : PipelineAck.ECN
-  .DISABLED;
+if (!pipelineSupportECN) {
+  return PipelineAck.ECN.DISABLED;
+}
+double load = ManagementFactory.getOperatingSystemMXBean()
+.getSystemLoadAverage();
+return load  NUM_CORES * CONGESTION_RATIO ? PipelineAck.ECN.CONGESTED :
+PipelineAck.ECN.SUPPORTED;
   }
 
   /**



[16/50] [abbrv] hadoop git commit: Add the missing files for HDFS-8009.

2015-04-06 Thread zjshen
Add the missing files for HDFS-8009.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/777f01b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/777f01b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/777f01b5

Branch: refs/heads/YARN-2928
Commit: 777f01b5defa1b53cde21042f5b56a22175e5a87
Parents: 0d7c075
Author: Haohui Mai whe...@apache.org
Authored: Wed Apr 1 10:58:32 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:11 2015 -0700

--
 .../hdfs/server/datanode/TestDataNodeECN.java   | 45 
 1 file changed, 45 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/777f01b5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
new file mode 100644
index 000..b994386
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeECN.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
+import org.junit.Assert;
+import org.junit.Test;
+
+import java.io.IOException;
+
+public class TestDataNodeECN {
+  @Test
+  public void testECNFlag() throws IOException {
+Configuration conf = new Configuration();
+conf.setBoolean(DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED, true);
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
+  PipelineAck.ECN ecn = cluster.getDataNodes().get(0).getECN();
+  Assert.assertNotEquals(PipelineAck.ECN.DISABLED, ecn);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+}



[50/50] [abbrv] hadoop git commit: HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock for a very long time (sinago via cmccabe)

2015-04-06 Thread zjshen
HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl lock 
for a very long time (sinago via cmccabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8898d14c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8898d14c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8898d14c

Branch: refs/heads/YARN-2928
Commit: 8898d14c00981c48bdf6c575d6604e19adaca7f7
Parents: 37725e7
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon Apr 6 08:54:46 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:17 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 77 +---
 2 files changed, 52 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8898d14c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 6fafec8..52325a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1379,6 +1379,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8051. FsVolumeList#addVolume should release volume reference if not
 put it into BlockScanner. (Lei (Eddy) Xu via Colin P. McCabe)
 
+HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
+lock for a very long time (sinago via cmccabe)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8898d14c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index f15f649..6bcbe5a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -1412,38 +1412,59 @@ class FsDatasetImpl implements 
FsDatasetSpiFsVolumeImpl {
   }
 
   @Override // FsDatasetSpi
-  public synchronized ReplicaHandler createTemporary(
+  public ReplicaHandler createTemporary(
   StorageType storageType, ExtendedBlock b) throws IOException {
-ReplicaInfo replicaInfo = volumeMap.get(b.getBlockPoolId(), 
b.getBlockId());
-if (replicaInfo != null) {
-  if (replicaInfo.getGenerationStamp()  b.getGenerationStamp()
-   replicaInfo instanceof ReplicaInPipeline) {
-// Stop the previous writer
-((ReplicaInPipeline)replicaInfo)
-  
.stopWriter(datanode.getDnConf().getXceiverStopTimeout());
-invalidate(b.getBlockPoolId(), new Block[]{replicaInfo});
-  } else {
-throw new ReplicaAlreadyExistsException(Block  + b +
- already exists in state  + replicaInfo.getState() +
- and thus cannot be created.);
+long startTimeMs = Time.monotonicNow();
+long writerStopTimeoutMs = datanode.getDnConf().getXceiverStopTimeout();
+ReplicaInfo lastFoundReplicaInfo = null;
+do {
+  synchronized (this) {
+ReplicaInfo currentReplicaInfo =
+volumeMap.get(b.getBlockPoolId(), b.getBlockId());
+if (currentReplicaInfo == lastFoundReplicaInfo) {
+  if (lastFoundReplicaInfo != null) {
+invalidate(b.getBlockPoolId(), new Block[] { lastFoundReplicaInfo 
});
+  }
+  FsVolumeReference ref =
+  volumes.getNextVolume(storageType, b.getNumBytes());
+  FsVolumeImpl v = (FsVolumeImpl) ref.getVolume();
+  // create a temporary file to hold block in the designated volume
+  File f;
+  try {
+f = v.createTmpFile(b.getBlockPoolId(), b.getLocalBlock());
+  } catch (IOException e) {
+IOUtils.cleanup(null, ref);
+throw e;
+  }
+  ReplicaInPipeline newReplicaInfo =
+  new ReplicaInPipeline(b.getBlockId(), b.getGenerationStamp(), v,
+  f.getParentFile(), 0);
+  volumeMap.add(b.getBlockPoolId(), newReplicaInfo);
+  return new ReplicaHandler(newReplicaInfo, ref);
+} else {
+  if (!(currentReplicaInfo.getGenerationStamp()  b
+  .getGenerationStamp()  currentReplicaInfo instanceof 

[02/50] [abbrv] hadoop git commit: HDFS-6945. BlockManager should remove a block from excessReplicateMap and decrement ExcessBlocks metric when the block is removed. (aajisaka)

2015-04-06 Thread zjshen
HDFS-6945. BlockManager should remove a block from excessReplicateMap and 
decrement ExcessBlocks metric when the block is removed. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71b43741
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71b43741
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71b43741

Branch: refs/heads/YARN-2928
Commit: 71b43741dde686725ac188ae91b14185989944a1
Parents: 40a1282
Author: Akira Ajisaka aajis...@apache.org
Authored: Wed Apr 1 09:07:28 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../server/blockmanagement/BlockManager.java| 22 ++--
 .../namenode/metrics/TestNameNodeMetrics.java   |  9 ++--
 3 files changed, 30 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b43741/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f3537b0..4247ea6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -398,6 +398,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7997. The first non-existing xattr should also throw IOException.
 (zhouyingchao via yliu)
 
+HDFS-6945. BlockManager should remove a block from excessReplicateMap and
+decrement ExcessBlocks metric when the block is removed. (aajisaka)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b43741/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f6e15a3..d9aee62 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3351,8 +3351,7 @@ public class BlockManager {
 // file already removes them from the block map below.
 block.setNumBytes(BlockCommand.NO_ACK);
 addToInvalidates(block);
-corruptReplicas.removeFromCorruptReplicasMap(block);
-blocksMap.removeBlock(block);
+removeBlockFromMap(block);
 // Remove the block from pendingReplications and neededReplications
 pendingReplications.remove(block);
 neededReplications.remove(block, UnderReplicatedBlocks.LEVEL);
@@ -3528,11 +3527,30 @@ public class BlockManager {
   }
 
   public void removeBlockFromMap(Block block) {
+removeFromExcessReplicateMap(block);
 blocksMap.removeBlock(block);
 // If block is removed from blocksMap remove it from corruptReplicasMap
 corruptReplicas.removeFromCorruptReplicasMap(block);
   }
 
+  /**
+   * If a block is removed from blocksMap, remove it from excessReplicateMap.
+   */
+  private void removeFromExcessReplicateMap(Block block) {
+for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
+  String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
+  LightWeightLinkedSetBlock excessReplicas = 
excessReplicateMap.get(uuid);
+  if (excessReplicas != null) {
+if (excessReplicas.remove(block)) {
+  excessBlocksCount.decrementAndGet();
+  if (excessReplicas.isEmpty()) {
+excessReplicateMap.remove(uuid);
+  }
+}
+  }
+}
+  }
+
   public int getCapacity() {
 return blocksMap.getCapacity();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b43741/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
index 64ea1e4..b390391 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java
@@ -280,11 +280,16 @@ public class TestNameNodeMetrics {
   public void testExcessBlocks() throws Exception {
   

[34/50] [abbrv] hadoop git commit: HADOOP-11797. releasedocmaker.py needs to put ASF headers on output (aw)

2015-04-06 Thread zjshen
HADOOP-11797. releasedocmaker.py needs to put ASF headers on output (aw)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c134f991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c134f991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c134f991

Branch: refs/heads/YARN-2928
Commit: c134f991272f40c87ddeeafb07fb96e801ec53df
Parents: d5d8402
Author: Allen Wittenauer a...@apache.org
Authored: Thu Apr 2 20:50:36 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:15 2015 -0700

--
 dev-support/releasedocmaker.py  | 23 
 hadoop-common-project/hadoop-common/CHANGES.txt |  2 ++
 2 files changed, 25 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c134f991/dev-support/releasedocmaker.py
--
diff --git a/dev-support/releasedocmaker.py b/dev-support/releasedocmaker.py
index b00c1a7..78c8e6b 100755
--- a/dev-support/releasedocmaker.py
+++ b/dev-support/releasedocmaker.py
@@ -30,6 +30,26 @@ except ImportError:
 releaseVersion={}
 namePattern = re.compile(r' \([0-9]+\)')
 
+asflicense='''
+!---
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# License); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an AS IS BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+--
+'''
+
 def clean(str):
   return tableclean(re.sub(namePattern, , str))
 
@@ -357,6 +377,9 @@ def main():
   %(ver)s/CHANGES.%(key)s.%(ver)s.md,
   [HADOOP,HDFS,MAPREDUCE,YARN], {ver:maxVersion, date:reldate})
 
+  reloutputs.writeAll(asflicense)
+  choutputs.writeAll(asflicense)
+
   relhead = '# Hadoop %(key)s %(ver)s Release Notes\n\n' \
 'These release notes cover new developer and user-facing 
incompatibilities, features, and major improvements.\n\n'
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c134f991/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 260cc83..495c42d 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -431,6 +431,8 @@ Trunk (Unreleased)
 HADOOP-11703. git should ignore .DS_Store files on Mac OS X (Abin Shahab
 via aw)
 
+HADOOP-11797. releasedocmaker.py needs to put ASF headers on output (aw)
+
   OPTIMIZATIONS
 
 HADOOP-7761. Improve the performance of raw comparisons. (todd)



[27/50] [abbrv] hadoop git commit: HDFS-7954. TestBalancer#testBalancerWithPinnedBlocks should not be executed on Windows. Contributed by Xiaoyu Yao

2015-04-06 Thread zjshen
HDFS-7954. TestBalancer#testBalancerWithPinnedBlocks should not be executed on 
Windows.  Contributed by Xiaoyu Yao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0a1dcd17
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0a1dcd17
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0a1dcd17

Branch: refs/heads/YARN-2928
Commit: 0a1dcd17cc585d145ad4ee1e84ac3d3267dd105c
Parents: b98f637
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Apr 2 11:11:34 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:13 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt| 3 +++
 .../org/apache/hadoop/hdfs/server/balancer/TestBalancer.java   | 6 ++
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a1dcd17/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0c66309..85c013e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1360,6 +1360,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-8001 RpcProgramNfs3 : wrong parsing of dfs.blocksize
 (Remi Catherinot via brandonli)
 
+HDFS-7954. TestBalancer#testBalancerWithPinnedBlocks should not be executed
+on Windows.  (Xiaoyu Yao via szetszwo)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0a1dcd17/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
index 8b2d11e..9aea283 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
 
 import java.io.File;
 import java.io.IOException;
@@ -309,6 +310,11 @@ public class TestBalancer {
*/
   @Test(timeout=10)
   public void testBalancerWithPinnedBlocks() throws Exception {
+// This test assumes stick-bit based block pin mechanism available only
+// in Linux/Unix. It can be unblocked on Windows when HDFS-7759 is ready to
+// provide a different mechanism for Windows.
+assumeTrue(!Path.WINDOWS);
+
 final Configuration conf = new HdfsConfiguration();
 initConf(conf);
 conf.setBoolean(DFS_DATANODE_BLOCK_PINNING_ENABLED, true);



[32/50] [abbrv] hadoop git commit: HDFS-7893. Update the POM to create a separate hdfs-client jar. Contributed by Haohui Mai.

2015-04-06 Thread zjshen
HDFS-7893. Update the POM to create a separate hdfs-client jar. Contributed by 
Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/88c14681
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/88c14681
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/88c14681

Branch: refs/heads/YARN-2928
Commit: 88c146814b3431a8eb98c03a0ed706a675058b4e
Parents: e75307b
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 2 15:29:18 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:14 2015 -0700

--
 .../dev-support/findbugsExcludeFile.xml |  2 +
 hadoop-hdfs-project/hadoop-hdfs-client/pom.xml  | 49 
 hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml |  5 ++
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  5 ++
 .../hadoop-hdfs/src/contrib/bkjournal/pom.xml   |  5 ++
 hadoop-hdfs-project/pom.xml |  1 +
 hadoop-project/pom.xml  |  5 ++
 7 files changed, 72 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/88c14681/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
new file mode 100644
index 000..43bc332
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -0,0 +1,2 @@
+FindBugsFilter
+/FindBugsFilter

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88c14681/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
new file mode 100644
index 000..900f345
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/pom.xml
@@ -0,0 +1,49 @@
+?xml version=1.0 encoding=UTF-8?
+!--
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+project xmlns=http://maven.apache.org/POM/4.0.0;
+xmlns:xsi=http://www.w3.org/2001/XMLSchema-instance;
+xsi:schemaLocation=http://maven.apache.org/POM/4.0.0
+http://maven.apache.org/xsd/maven-4.0.0.xsd;
+  modelVersion4.0.0/modelVersion
+  parent
+groupIdorg.apache.hadoop/groupId
+artifactIdhadoop-project-dist/artifactId
+version3.0.0-SNAPSHOT/version
+relativePath../../hadoop-project-dist/relativePath
+  /parent
+  groupIdorg.apache.hadoop/groupId
+  artifactIdhadoop-hdfs-client/artifactId
+  version3.0.0-SNAPSHOT/version
+  descriptionApache Hadoop HDFS Client/description
+  nameApache Hadoop HDFS Client/name
+  packagingjar/packaging
+  build
+plugins
+  plugin
+groupIdorg.apache.maven.plugins/groupId
+artifactIdmaven-surefire-plugin/artifactId
+  /plugin
+  plugin
+groupIdorg.apache.rat/groupId
+artifactIdapache-rat-plugin/artifactId
+configuration
+  excludes
+excludedev-support/findbugsExcludeFile.xml/exclude
+  /excludes
+/configuration
+  /plugin
+/plugins
+  /build
+/project

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88c14681/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
index 9a9d29c..ac8930c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
@@ -58,6 +58,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
 /dependency
 dependency
   groupIdorg.apache.hadoop/groupId
+  artifactIdhadoop-hdfs-client/artifactId
+  scopecompile/scope
+/dependency
+dependency
+  groupIdorg.apache.hadoop/groupId
   artifactIdhadoop-hdfs/artifactId
   scopetest/scope
   typetest-jar/type

http://git-wip-us.apache.org/repos/asf/hadoop/blob/88c14681/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 2d402a9..c11b963 

[03/50] [abbrv] hadoop git commit: Reverted MAPREDUCE-6286, MAPREDUCE-6199, and MAPREDUCE-5875 from branch-2.7. Editing CHANGES.txt to reflect this.

2015-04-06 Thread zjshen
Reverted MAPREDUCE-6286, MAPREDUCE-6199, and MAPREDUCE-5875 from branch-2.7. 
Editing CHANGES.txt to reflect this.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/40a12828
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/40a12828
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/40a12828

Branch: refs/heads/YARN-2928
Commit: 40a1282880788e2dd2a82ad54838f357348117b3
Parents: 5b9d546
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Tue Mar 31 13:29:20 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:09 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt | 20 ++--
 1 file changed, 10 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/40a12828/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index b0367a7..f5d2d1a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -311,6 +311,16 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6294. Remove an extra parameter described in Javadoc of
 TockenCache. (Brahma Reddy Battula via ozawa)
 
+MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
+MRAppMaster, and YarnChild. (Gera Shegalov via kasha)
+
+MAPREDUCE-6199. AbstractCounters are not reset completely on
+deserialization (adhoot via rkanter)
+
+MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
+causes counter limits are not reset correctly.
+(Zhihai Xu via harsh)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES
@@ -411,10 +421,6 @@ Release 2.7.0 - UNRELEASED
 
   BUG FIXES
 
-MAPREDUCE-6286. A typo in HistoryViewer makes some code useless, which
-causes counter limits are not reset correctly.
-(Zhihai Xu via harsh)
-
 MAPREDUCE-6210. Use getApplicationAttemptId() instead of getApplicationId()
 for logging AttemptId in RMContainerAllocator.java (Leitao Guo via 
aajisaka)
 
@@ -448,9 +454,6 @@ Release 2.7.0 - UNRELEASED
 MAPREDUCE-6045. need close the DataInputStream after open it in
 TestMapReduce.java (zxu via rkanter)
 
-MAPREDUCE-6199. AbstractCounters are not reset completely on
-deserialization (adhoot via rkanter)
-
 MAPREDUCE-6206. TestAggregatedTransferRate fails on non-US systems (Jens
 Rabe via jlowe)
 
@@ -721,9 +724,6 @@ Release 2.6.0 - 2014-11-18
 MAPREDUCE-6123. TestCombineFileInputFormat incorrectly starts 2
 MiniDFSCluster instances. (cnauroth)
 
-MAPREDUCE-5875. Make Counter limits consistent across JobClient, 
-MRAppMaster, and YarnChild. (Gera Shegalov via kasha)
-
 MAPREDUCE-6125. TestContainerLauncherImpl sometimes fails (Mit Desai via
 jlowe)
 



[09/50] [abbrv] hadoop git commit: YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring from info to debug. Contributed by Anubhav Dhoot.

2015-04-06 Thread zjshen
YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring from info 
to debug. Contributed by Anubhav Dhoot.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db2998ed
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db2998ed
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db2998ed

Branch: refs/heads/YARN-2928
Commit: db2998ed25c2331a8a42ef50c2ee4c8afdc2a65e
Parents: b62eb19
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Wed Apr 1 17:44:25 2015 +0900
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:10 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt|  3 +++
 .../monitor/ContainersMonitorImpl.java | 13 -
 2 files changed, 11 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db2998ed/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index dab8116..f18cf30 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -147,6 +147,9 @@ Release 2.8.0 - UNRELEASED
 
 YARN-3428. Debug log resources to be localized for a container. (kasha)
 
+YARN-3424. Change logs for ContainerMonitorImpl's resourse monitoring
+from info to debug. (Anubhav Dhoot via ozawa)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db2998ed/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
index 6200efa..bfd6b48 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java
@@ -519,11 +519,14 @@ public class ContainersMonitorImpl extends 
AbstractService implements
 long curRssMemUsageOfAgedProcesses = pTree.getRssMemorySize(1);
 long vmemLimit = ptInfo.getVmemLimit();
 long pmemLimit = ptInfo.getPmemLimit();
-LOG.info(String.format(
-Memory usage of ProcessTree %s for container-id %s: ,
- pId, containerId.toString()) +
-formatUsageString(
-currentVmemUsage, vmemLimit, currentPmemUsage, pmemLimit));
+if (LOG.isDebugEnabled()) {
+  LOG.debug(String.format(
+  Memory usage of ProcessTree %s for container-id %s: ,
+  pId, containerId.toString()) +
+  formatUsageString(
+  currentVmemUsage, vmemLimit,
+  currentPmemUsage, pmemLimit));
+}
 
 // Add usage to container metrics
 if (containerMetricsEnabled) {



[28/50] [abbrv] hadoop git commit: HADOOP-9805. Refactor RawLocalFileSystem#rename for improved testability. Contributed by Jean-Pierre Matsumoto.

2015-04-06 Thread zjshen
HADOOP-9805. Refactor RawLocalFileSystem#rename for improved testability. 
Contributed by Jean-Pierre Matsumoto.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/503e4902
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/503e4902
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/503e4902

Branch: refs/heads/YARN-2928
Commit: 503e4902c88cbb6ddcfb423e8530c431dcdf0a8f
Parents: 88c1468
Author: cnauroth cnaur...@apache.org
Authored: Thu Apr 2 16:13:00 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:14 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 ++
 .../apache/hadoop/fs/RawLocalFileSystem.java| 36 ++-
 .../rawlocal/TestRawlocalContractRename.java| 47 
 3 files changed, 74 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/503e4902/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index c92e378..260cc83 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -476,6 +476,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11660. Add support for hardware crc of HDFS checksums on ARM aarch64
 architecture (Edward Nevill via Colin P. McCabe)
 
+HADOOP-9805. Refactor RawLocalFileSystem#rename for improved testability.
+(Jean-Pierre Matsumoto via cnauroth)
+
   OPTIMIZATIONS
 
   BUG FIXES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/503e4902/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
index d7866b8..52623b8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
@@ -43,7 +43,6 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
-import org.apache.hadoop.io.nativeio.NativeIOException;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.StringUtils;
@@ -347,11 +346,29 @@ public class RawLocalFileSystem extends FileSystem {
   return true;
 }
 
-// Enforce POSIX rename behavior that a source directory replaces an 
existing
-// destination if the destination is an empty directory.  On most 
platforms,
-// this is already handled by the Java API call above.  Some platforms
-// (notably Windows) do not provide this behavior, so the Java API call 
above
-// fails.  Delete destination and attempt rename again.
+// Else try POSIX style rename on Windows only
+if (Shell.WINDOWS 
+handleEmptyDstDirectoryOnWindows(src, srcFile, dst, dstFile)) {
+  return true;
+}
+
+// The fallback behavior accomplishes the rename by a full copy.
+if (LOG.isDebugEnabled()) {
+  LOG.debug(Falling through to a copy of  + src +  to  + dst);
+}
+return FileUtil.copy(this, src, this, dst, true, getConf());
+  }
+
+  @VisibleForTesting
+  public final boolean handleEmptyDstDirectoryOnWindows(Path src, File srcFile,
+  Path dst, File dstFile) throws IOException {
+
+// Enforce POSIX rename behavior that a source directory replaces an
+// existing destination if the destination is an empty directory. On most
+// platforms, this is already handled by the Java API call above. Some
+// platforms (notably Windows) do not provide this behavior, so the Java 
API
+// call renameTo(dstFile) fails. Delete destination and attempt rename
+// again.
 if (this.exists(dst)) {
   FileStatus sdst = this.getFileStatus(dst);
   if (sdst.isDirectory()  dstFile.list().length == 0) {
@@ -364,12 +381,7 @@ public class RawLocalFileSystem extends FileSystem {
 }
   }
 }
-
-// The fallback behavior accomplishes the rename by a full copy.
-if (LOG.isDebugEnabled()) {
-  LOG.debug(Falling through to a copy of  + src +  to  + dst);
-}
-return FileUtil.copy(this, src, this, dst, true, getConf());
+return false;
   }
 
   @Override


[33/50] [abbrv] hadoop git commit: YARN-2901. Add errors and warning metrics page to RM, NM web UI. (Varun Vasudev via wangda)

2015-04-06 Thread zjshen
YARN-2901. Add errors and warning metrics page to RM, NM web UI. (Varun Vasudev 
via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d5d8402d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d5d8402d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d5d8402d

Branch: refs/heads/YARN-2928
Commit: d5d8402d943a141ab1497d2d0cb8371cd6d3dd78
Parents: 4f66d40
Author: Wangda Tan wan...@apache.org
Authored: Thu Apr 2 17:23:20 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 12:08:15 2015 -0700

--
 .../src/main/conf/log4j.properties  |   9 +
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../util/Log4jWarningErrorMetricsAppender.java  | 447 +++
 .../TestLog4jWarningErrorMetricsAppender.java   | 260 +++
 .../server/webapp/ErrorsAndWarningsBlock.java   | 233 ++
 .../server/nodemanager/webapp/NMController.java |   4 +
 .../webapp/NMErrorsAndWarningsPage.java |  55 +++
 .../server/nodemanager/webapp/NavBlock.java |  24 +-
 .../server/nodemanager/webapp/WebServer.java|   1 +
 .../server/resourcemanager/webapp/NavBlock.java |  27 +-
 .../webapp/RMErrorsAndWarningsPage.java |  54 +++
 .../server/resourcemanager/webapp/RMWebApp.java |   1 +
 .../resourcemanager/webapp/RmController.java|   4 +
 13 files changed, 1114 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d8402d/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties 
b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
index 316c48e..3a0a3ad 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
+++ b/hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
@@ -266,3 +266,12 @@ 
log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
 
#log4j.appender.nodemanagerrequestlog=org.apache.hadoop.http.HttpRequestLogAppender
 
#log4j.appender.nodemanagerrequestlog.Filename=${hadoop.log.dir}/jetty-nodemanager-_mm_dd.log
 #log4j.appender.nodemanagerrequestlog.RetainDays=3
+
+# Appender for viewing information for errors and warnings
+yarn.ewma.cleanupInterval=300
+yarn.ewma.messageAgeLimitSeconds=86400
+yarn.ewma.maxUniqueMessages=250
+log4j.appender.EWMA=org.apache.hadoop.yarn.util.Log4jWarningErrorMetricsAppender
+log4j.appender.EWMA.cleanupInterval=${yarn.ewma.cleanupInterval}
+log4j.appender.EWMA.messageAgeLimitSeconds=${yarn.ewma.messageAgeLimitSeconds}
+log4j.appender.EWMA.maxUniqueMessages=${yarn.ewma.maxUniqueMessages}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d8402d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index bcd2286..004044a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -157,6 +157,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3248. Display count of nodes blacklisted by apps in the web UI.
 (Varun Vasudev via xgong)
 
+YARN-2901. Add errors and warning metrics page to RM, NM web UI. 
+(Varun Vasudev via wangda)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d5d8402d/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
new file mode 100644
index 000..0366ae0
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Log4jWarningErrorMetricsAppender.java
@@ -0,0 +1,447 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on 

hadoop git commit: HDFS-7969. Erasure coding: NameNode support for lease recovery of striped block groups. Contributed by Zhe Zhang.

2015-04-06 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 e6ecbaafd - a84274f90


HDFS-7969. Erasure coding: NameNode support for lease recovery of striped block 
groups. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a84274f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a84274f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a84274f9

Branch: refs/heads/HDFS-7285
Commit: a84274f90b5630b5869e03f6bb1d8b0b7f03ae49
Parents: e6ecbaa
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 6 12:52:44 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 6 12:52:44 2015 -0700

--
 .../BlockInfoContiguousUnderConstruction.java   | 33 
 .../BlockInfoStripedUnderConstruction.java  | 80 
 .../BlockInfoUnderConstruction.java | 57 ++
 .../blockmanagement/DatanodeDescriptor.java | 12 +--
 .../server/blockmanagement/DatanodeManager.java | 10 +--
 .../hdfs/server/namenode/FSNamesystem.java  | 24 +++---
 .../TestBlockInfoUnderConstruction.java |  2 +-
 7 files changed, 163 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a84274f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 7a052fd..9ba2978 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -31,7 +31,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
  * Represents a block that is currently being constructed.br
  * This is usually the last block of a file opened for write or append.
  */
-public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
+public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous
+implements BlockInfoUnderConstruction{
   /** Block state. See {@link BlockUCState} */
   private BlockUCState blockUCState;
 
@@ -94,7 +95,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 return new BlockInfoContiguous(this);
   }
 
-  /** Set expected locations */
+  @Override
   public void setExpectedLocations(DatanodeStorageInfo[] targets) {
 int numLocations = targets == null ? 0 : targets.length;
 this.replicas = new ArrayList(numLocations);
@@ -104,10 +105,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 }
   }
 
-  /**
-   * Create array of expected replica locations
-   * (as has been assigned by chooseTargets()).
-   */
+  @Override
   public DatanodeStorageInfo[] getExpectedStorageLocations() {
 int numLocations = replicas == null ? 0 : replicas.size();
 DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations];
@@ -117,7 +115,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 return storages;
   }
 
-  /** Get the number of expected locations */
+  @Override
   public int getNumExpectedLocations() {
 return replicas == null ? 0 : replicas.size();
   }
@@ -135,25 +133,26 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 blockUCState = s;
   }
 
-  /** Get block recovery ID */
+  @Override
   public long getBlockRecoveryId() {
 return blockRecoveryId;
   }
 
-  /** Get recover block */
+  @Override
   public Block getTruncateBlock() {
 return truncateBlock;
   }
 
+  @Override
+  public Block toBlock(){
+return this;
+  }
+
   public void setTruncateBlock(Block recoveryBlock) {
 this.truncateBlock = recoveryBlock;
   }
 
-  /**
-   * Process the recorded replicas. When about to commit or finish the
-   * pipeline recovery sort out bad replicas.
-   * @param genStamp  The final generation stamp for the block.
-   */
+  @Override
   public void setGenerationStampAndVerifyReplicas(long genStamp) {
 // Set the generation stamp for the block.
 setGenerationStamp(genStamp);
@@ -187,11 +186,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }
 
-  /**
-   * Initialize lease recovery for this block.
-   * Find the first alive data-node 

[Hadoop Wiki] Update of ContributorsGroup by SteveLoughran

2015-04-06 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The ContributorsGroup page has been changed by SteveLoughran:
https://wiki.apache.org/hadoop/ContributorsGroup?action=diffrev1=100rev2=101

Comment:
+JoshBaer

   * JonathanHsieh
   * JonathanSmith
   * joshdsullivan
+  * JoshBaer
   * JoydeepSensarma
   * JunpingDu
   * karthikkambatla


hadoop git commit: YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with error message:Invalid AMRMToken (zxu via rkanter)

2015-04-06 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 28bebc81d - 99b08a748


YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with error 
message:Invalid AMRMToken (zxu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99b08a74
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99b08a74
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99b08a74

Branch: refs/heads/trunk
Commit: 99b08a748e7b00a58b63330b353902a6da6aae27
Parents: 28bebc8
Author: Robert Kanter rkan...@apache.org
Authored: Mon Apr 6 14:11:20 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Mon Apr 6 14:11:20 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../yarn/server/resourcemanager/security/TestAMRMTokens.java   | 6 ++
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b08a74/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 695c4a6..f836f04 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -148,6 +148,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
 (Zhihai Xu via ozawa)
 
+YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
+error message:Invalid AMRMToken (zxu via rkanter)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b08a74/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
index 0be72e3..5dfd092 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
@@ -107,6 +107,12 @@ public class TestAMRMTokens {
   @SuppressWarnings(unchecked)
   @Test
   public void testTokenExpiry() throws Exception {
+conf.setLong(
+YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
+YarnConfiguration.
+DEFAULT_RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS);
+conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,
+YarnConfiguration.DEFAULT_RM_AM_EXPIRY_INTERVAL_MS);
 
 MyContainerManager containerManager = new MyContainerManager();
 final MockRMWithAMS rm =



hadoop git commit: Move YARN-3273 from 2.8 to 2.7.

2015-04-06 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 354d2c75c - 735e4f15a


Move YARN-3273 from 2.8 to 2.7.

(cherry picked from commit 5e9f4099de4b949c838a6224442f335c67d80936)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/735e4f15
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/735e4f15
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/735e4f15

Branch: refs/heads/branch-2
Commit: 735e4f15ab0dcd1eadc160bc8c04905a55d94ca8
Parents: 354d2c7
Author: Zhijie Shen zjs...@apache.org
Authored: Mon Apr 6 12:28:31 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 14:15:23 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/735e4f15/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 7e6e7a4..1e6190b 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -20,9 +20,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3243. CapacityScheduler should pass headroom from parent to children
 to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 
-YARN-3273. Improve scheduler UI to facilitate scheduling analysis and
-debugging. (Rohith Sharmaks via jianhe)
-
 YARN-3357. Move TestFifoScheduler to FIFO package. (Rohith Sharmaks 
 via devaraj)
 
@@ -437,6 +434,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2777. Mark the end of individual log in aggregated log.
 (Varun Saxena via xgong)
 
+YARN-3273. Improve scheduler UI to facilitate scheduling analysis and
+debugging. (Rohith Sharmaks via jianhe)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 



hadoop git commit: YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with error message:Invalid AMRMToken (zxu via rkanter)

2015-04-06 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a82708990 - 354d2c75c


YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with error 
message:Invalid AMRMToken (zxu via rkanter)

(cherry picked from commit 99b08a748e7b00a58b63330b353902a6da6aae27)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/354d2c75
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/354d2c75
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/354d2c75

Branch: refs/heads/branch-2
Commit: 354d2c75cf8f48f47c5e1226ae4879952b0e3485
Parents: a827089
Author: Robert Kanter rkan...@apache.org
Authored: Mon Apr 6 14:11:20 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Mon Apr 6 14:11:50 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt| 3 +++
 .../yarn/server/resourcemanager/security/TestAMRMTokens.java   | 6 ++
 2 files changed, 9 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/354d2c75/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index e9d9a37..7e6e7a4 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -100,6 +100,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
 (Zhihai Xu via ozawa)
 
+YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
+error message:Invalid AMRMToken (zxu via rkanter)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/354d2c75/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
index 0be72e3..5dfd092 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestAMRMTokens.java
@@ -107,6 +107,12 @@ public class TestAMRMTokens {
   @SuppressWarnings(unchecked)
   @Test
   public void testTokenExpiry() throws Exception {
+conf.setLong(
+YarnConfiguration.RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS,
+YarnConfiguration.
+DEFAULT_RM_AMRM_TOKEN_MASTER_KEY_ROLLING_INTERVAL_SECS);
+conf.setLong(YarnConfiguration.RM_AM_EXPIRY_INTERVAL_MS,
+YarnConfiguration.DEFAULT_RM_AM_EXPIRY_INTERVAL_MS);
 
 MyContainerManager containerManager = new MyContainerManager();
 final MockRMWithAMS rm =



hadoop git commit: Move YARN-3273 from 2.8 to 2.7.

2015-04-06 Thread zjshen
Repository: hadoop
Updated Branches:
  refs/heads/trunk 99b08a748 - 3fb5abfc8


Move YARN-3273 from 2.8 to 2.7.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fb5abfc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fb5abfc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fb5abfc

Branch: refs/heads/trunk
Commit: 3fb5abfc87953377f86e06578518801a181d7697
Parents: 99b08a7
Author: Zhijie Shen zjs...@apache.org
Authored: Mon Apr 6 12:28:31 2015 -0700
Committer: Zhijie Shen zjs...@apache.org
Committed: Mon Apr 6 14:14:53 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fb5abfc/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f836f04..efb8153 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -68,9 +68,6 @@ Release 2.8.0 - UNRELEASED
 YARN-3243. CapacityScheduler should pass headroom from parent to children
 to make sure ParentQueue obey its capacity limits. (Wangda Tan via jianhe)
 
-YARN-3273. Improve scheduler UI to facilitate scheduling analysis and
-debugging. (Rohith Sharmaks via jianhe)
-
 YARN-3357. Move TestFifoScheduler to FIFO package. (Rohith Sharmaks 
 via devaraj)
 
@@ -482,6 +479,9 @@ Release 2.7.0 - UNRELEASED
 YARN-2777. Mark the end of individual log in aggregated log.
 (Varun Saxena via xgong)
 
+YARN-3273. Improve scheduler UI to facilitate scheduling analysis and
+debugging. (Rohith Sharmaks via jianhe)
+
   OPTIMIZATIONS
 
 YARN-2990. FairScheduler's delay-scheduling always waits for node-local 
and 



  1   2   >