hadoop git commit: HDFS-8071. Redundant checkFileProgress() in PART II of getAdditionalBlock(). Contributed by Konstantin Shvachko.

2015-04-07 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 735e4f15a - 81b522790


HDFS-8071. Redundant checkFileProgress() in PART II of getAdditionalBlock(). 
Contributed by Konstantin Shvachko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81b52279
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81b52279
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81b52279

Branch: refs/heads/branch-2
Commit: 81b522790bc59cb05fc5ff58f299a9a9f2eaa4fb
Parents: 735e4f1
Author: Konstantin V Shvachko s...@apache.org
Authored: Mon Apr 6 16:52:52 2015 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Apr 6 22:23:33 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 35 +---
 .../hdfs/server/namenode/TestAddBlockRetry.java | 17 +-
 3 files changed, 34 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81b52279/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e6de8b9..0e3defc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -574,6 +574,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7811. Avoid recursive call getStoragePolicyID in
 INodeFile#computeQuotaUsage. (Xiaoyu Yao and jing9)
 
+HDFS-8071. Redundant checkFileProgress() in PART II of 
getAdditionalBlock().
+(shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81b52279/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 12d3e05..57caeb0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3032,6 +3032,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   FileState fileState = analyzeFileState(
   src, fileId, clientName, previous, onRetryBlock);
   final INodeFile pendingFile = fileState.inode;
+  // Check if the penultimate block is minimally replicated
+  if (!checkFileProgress(src, pendingFile, false)) {
+throw new NotReplicatedYetException(Not replicated yet:  + src);
+  }
   src = fileState.path;
 
   if (onRetryBlock[0] != null  onRetryBlock[0].getLocations().length  
0) {
@@ -3244,11 +3248,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 last block in file  + lastBlockInFile);
   }
 }
-
-// Check if the penultimate block is minimally replicated
-if (!checkFileProgress(src, pendingFile, false)) {
-  throw new NotReplicatedYetException(Not replicated yet:  + src);
-}
 return new FileState(pendingFile, src, iip);
   }
 
@@ -3550,21 +3549,17 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* replicated.  If not, return false. If checkall is true, then check
* all blocks, otherwise check only penultimate block.
*/
-  private boolean checkFileProgress(String src, INodeFile v, boolean checkall) 
{
-readLock();
-try {
-  if (checkall) {
-return blockManager.checkBlocksProperlyReplicated(src, v
-.getBlocks());
-  } else {
-// check the penultimate block of this file
-BlockInfoContiguous b = v.getPenultimateBlock();
-return b == null ||
-blockManager.checkBlocksProperlyReplicated(
-src, new BlockInfoContiguous[] { b });
-  }
-} finally {
-  readUnlock();
+  boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
+assert hasReadLock();
+if (checkall) {
+  return blockManager.checkBlocksProperlyReplicated(src, v
+  .getBlocks());
+} else {
+  // check the penultimate block of this file
+  BlockInfoContiguous b = v.getPenultimateBlock();
+  return b == null ||
+  blockManager.checkBlocksProperlyReplicated(
+  src, new BlockInfoContiguous[] { b });
 }
   }
 


hadoop git commit: HDFS-8071. Redundant checkFileProgress() in PART II of getAdditionalBlock(). Contributed by Konstantin Shvachko.

2015-04-07 Thread shv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 64cf07985 - 2cb9dac9a


HDFS-8071. Redundant checkFileProgress() in PART II of getAdditionalBlock(). 
Contributed by Konstantin Shvachko.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2cb9dac9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2cb9dac9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2cb9dac9

Branch: refs/heads/branch-2.7
Commit: 2cb9dac9a30f67bc8d7749dc05bdd58ce3dff6a4
Parents: 64cf079
Author: Konstantin V Shvachko s...@apache.org
Authored: Mon Apr 6 16:52:52 2015 -0700
Committer: Konstantin V Shvachko s...@apache.org
Committed: Mon Apr 6 23:35:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  | 40 +---
 .../hdfs/server/namenode/TestAddBlockRetry.java | 17 -
 3 files changed, 36 insertions(+), 24 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cb9dac9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0d48959..657de32 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -465,6 +465,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7811. Avoid recursive call getStoragePolicyID in
 INodeFile#computeQuotaUsage. (Xiaoyu Yao and jing9)
 
+HDFS-8071. Redundant checkFileProgress() in PART II of 
getAdditionalBlock().
+(shv)
+
   OPTIMIZATIONS
 
 HDFS-7454. Reduce memory footprint for AclEntries in NameNode.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2cb9dac9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d1954f8..f3ae849 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3032,6 +3032,10 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   FileState fileState = analyzeFileState(
   src, fileId, clientName, previous, onRetryBlock);
   final INodeFile pendingFile = fileState.inode;
+  // Check if the penultimate block is minimally replicated
+  if (!checkFileProgress(src, pendingFile, false)) {
+throw new NotReplicatedYetException(Not replicated yet:  + src);
+  }
   src = fileState.path;
 
   if (onRetryBlock[0] != null  onRetryBlock[0].getLocations().length  
0) {
@@ -3244,11 +3248,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 last block in file  + lastBlockInFile);
   }
 }
-
-// Check if the penultimate block is minimally replicated
-if (!checkFileProgress(src, pendingFile, false)) {
-  throw new NotReplicatedYetException(Not replicated yet:  + src);
-}
 return new FileState(pendingFile, src, iip);
   }
 
@@ -3551,28 +3550,23 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
* replicated.  If not, return false. If checkall is true, then check
* all blocks, otherwise check only penultimate block.
*/
-  private boolean checkFileProgress(String src, INodeFile v, boolean checkall) 
{
-readLock();
-try {
-  if (checkall) {
-// check all blocks of the file.
-for (BlockInfoContiguous block: v.getBlocks()) {
-  if (!isCompleteBlock(src, block, blockManager.minReplication)) {
-return false;
-  }
-}
-  } else {
-// check the penultimate block of this file
-BlockInfoContiguous b = v.getPenultimateBlock();
-if (b != null
- !isCompleteBlock(src, b, blockManager.minReplication)) {
+  boolean checkFileProgress(String src, INodeFile v, boolean checkall) {
+if (checkall) {
+  // check all blocks of the file.
+  for (BlockInfoContiguous block: v.getBlocks()) {
+if (!isCompleteBlock(src, block, blockManager.minReplication)) {
   return false;
 }
   }
-  return true;
-} finally {
-  readUnlock();
+} else {
+  // check the penultimate block of this file
+  BlockInfoContiguous b = v.getPenultimateBlock();
+  if (b != null
+   !isCompleteBlock(src, b, blockManager.minReplication)) {
+return 

[2/2] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang. Updated CHANGES-HDFS-EC-7285.txt

2015-04-07 Thread vinayakumarb
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b839dd3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b839dd3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b839dd3b

Branch: refs/heads/HDFS-7285
Commit: b839dd3b094b8842f2da3346859b6cb642c90a48
Parents: b494c30
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:35:18 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 7 15:35:18 2015 +0530

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 5 +
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b839dd3b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 68d1d32..7716728 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -33,5 +33,7 @@
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
 
+HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
+
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b839dd3b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3874cb4..9927ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,7 +49,4 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
-
-HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
-
+manage EC zones (Zhe Zhang)
\ No newline at end of file



[1/2] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin Updated CHANGES-HDFS-EC-7285.txt

2015-04-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 1ceda4394 - b839dd3b0


HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b494c304
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b494c304
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b494c304

Branch: refs/heads/HDFS-7285
Commit: b494c304dfbafa330c8ca7ba29e3ca1d7600ecb0
Parents: 1ceda43
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:34:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 7 15:34:37 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b494c304/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 4e60a7c..3874cb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -48,9 +48,6 @@
 HDFS-7617. Add unit tests for editlog transactions for EC 
 (Hui Zheng via Zhe Zhang)
 
-HADOOP-11782. Correct two thrown messages in ECSchema class
-(Xinwei Qin via Kai Zheng)
-
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 



[1/2] hadoop git commit: HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng

2015-04-07 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 a84274f90 - 1ceda4394


HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a948cb76
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a948cb76
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a948cb76

Branch: refs/heads/HDFS-7285
Commit: a948cb76e4c9c9dabcf268040ba191b1b0be1fd7
Parents: a84274f
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:26:40 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Wed Apr 8 01:26:40 2015 +0800

--
 .../io/erasurecode/coder/RSErasureDecoder.java  |  8 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  4 +-
 .../io/erasurecode/coder/XORErasureDecoder.java | 78 
 .../io/erasurecode/coder/XORErasureEncoder.java | 45 ++
 .../io/erasurecode/coder/XorErasureDecoder.java | 78 
 .../io/erasurecode/coder/XorErasureEncoder.java | 45 --
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  | 69 ---
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  | 78 
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ---
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 69 +++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 78 
 .../rawcoder/RSRawErasureCoderFactory.java  | 34 +++
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 81 +
 .../io/erasurecode/rawcoder/XORRawEncoder.java  | 61 +
 .../rawcoder/XORRawErasureCoderFactory.java | 34 +++
 .../io/erasurecode/rawcoder/XorRawDecoder.java  | 81 -
 .../io/erasurecode/rawcoder/XorRawEncoder.java  | 61 -
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ---
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 .../io/erasurecode/coder/TestXORCoder.java  | 50 +++
 .../io/erasurecode/coder/TestXorCoder.java  | 50 ---
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 93 
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 93 
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 49 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   | 51 ---
 25 files changed, 680 insertions(+), 682 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a948cb76/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index ba32f04..e2c5051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -4,9 +4,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.rawcoder.JRSRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
-import org.apache.hadoop.io.erasurecode.rawcoder.XorRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
 
 /**
  * Reed-Solomon erasure decoder that decodes a block group.
@@ -56,7 +56,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
   rsRawDecoder = createRawDecoder(
   CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY);
   if (rsRawDecoder == null) {
-rsRawDecoder = new JRSRawDecoder();
+rsRawDecoder = new RSRawDecoder();
   }
   rsRawDecoder.initialize(getNumDataUnits(),
   getNumParityUnits(), getChunkSize());
@@ -66,7 +66,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
 
   private RawErasureDecoder checkCreateXorRawDecoder() {
 if (xorRawDecoder == null) {
-  xorRawDecoder = new XorRawDecoder();
+  xorRawDecoder = new XORRawDecoder();
   xorRawDecoder.initialize(getNumDataUnits(), 1, getChunkSize());
 }
 return xorRawDecoder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a948cb76/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 

[2/2] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-07 Thread drankye
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1ceda439
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1ceda439
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1ceda439

Branch: refs/heads/HDFS-7285
Commit: 1ceda439494e62865f2e87271cfcc1d0052e3240
Parents: a948cb7
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:31:46 2015 +0800
Committer: Kai Zheng kai.zh...@intel.com
Committed: Wed Apr 8 01:31:46 2015 +0800

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1ceda439/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 01280db..68d1d32 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -32,3 +32,6 @@
 
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
+
+HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
+( Kai Zheng )



hadoop git commit: HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure code ( Contributed by Kai Zheng)

2015-04-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 b839dd3b0 - bd3f89108


HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure 
code ( Contributed by Kai Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd3f8910
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd3f8910
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd3f8910

Branch: refs/heads/HDFS-7285
Commit: bd3f891085b70ecffa26b64a87a2d0c5ced98ceb
Parents: b839dd3
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 16:05:22 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 7 16:05:22 2015 +0530

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java | 18 
 .../erasurecode/codec/AbstractErasureCodec.java | 88 +++
 .../io/erasurecode/codec/ErasureCodec.java  | 56 
 .../io/erasurecode/codec/RSErasureCodec.java| 38 +
 .../io/erasurecode/codec/XORErasureCodec.java   | 45 ++
 .../erasurecode/coder/AbstractErasureCoder.java |  7 ++
 .../io/erasurecode/coder/ErasureCoder.java  |  7 ++
 .../io/erasurecode/grouper/BlockGrouper.java| 90 
 9 files changed, 352 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd3f8910/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7716728..c72394e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -37,3 +37,6 @@
 
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
+( Kai Zheng via vinayakumarb )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd3f8910/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
index 2c851a5..0a86907 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
@@ -79,4 +79,22 @@ public class ECBlockGroup {
 return false;
   }
 
+  /**
+   * Get erased blocks count
+   * @return
+   */
+  public int getErasedCount() {
+int erasedCount = 0;
+
+for (ECBlock dataBlock : dataBlocks) {
+  if (dataBlock.isErased()) erasedCount++;
+}
+
+for (ECBlock parityBlock : parityBlocks) {
+  if (parityBlock.isErased()) erasedCount++;
+}
+
+return erasedCount;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd3f8910/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
new file mode 100644
index 000..9993786
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.conf.Configured;
+import 

hadoop git commit: HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop servers. (Larry McCay via omalley)

2015-04-07 Thread omalley
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 81b522790 - c16af2f6f


HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop
servers. (Larry McCay via omalley)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c16af2f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c16af2f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c16af2f6

Branch: refs/heads/branch-2
Commit: c16af2f6f8bc8e45e8d63571c48d0084872f0f1d
Parents: 81b5227
Author: Owen O'Malley omal...@apache.org
Authored: Tue Apr 7 08:09:41 2015 -0700
Committer: Owen O'Malley omal...@apache.org
Committed: Tue Apr 7 08:17:10 2015 -0700

--
 hadoop-common-project/hadoop-auth/pom.xml   |  11 +
 .../JWTRedirectAuthenticationHandler.java   | 363 
 .../authentication/util/CertificateUtil.java|  65 +++
 .../TestJWTRedirectAuthentictionHandler.java| 418 +++
 .../util/TestCertificateUtil.java   |  96 +
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 hadoop-project/pom.xml  |  13 +
 7 files changed, 969 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c16af2f6/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 8749af2..ccbfca0 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -113,6 +113,17 @@
   scopecompile/scope
 /dependency
 dependency
+  groupIdcom.nimbusds/groupId
+  artifactIdnimbus-jose-jwt/artifactId
+  scopecompile/scope
+  exclusions
+exclusion
+  groupIdorg.bouncycastle/groupId
+  artifactIdbcprov-jdk15on/artifactId
+/exclusion
+  /exclusions
+/dependency
+dependency
   groupIdorg.apache.directory.server/groupId
   artifactIdapacheds-kerberos-codec/artifactId
   scopecompile/scope

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c16af2f6/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
new file mode 100644
index 000..42df6a0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.server;
+
+import java.io.IOException;
+
+import javax.servlet.http.Cookie;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+import java.text.ParseException;
+
+import java.io.ByteArrayInputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.PublicKey;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509Certificate;
+import java.security.cert.CertificateException;
+import java.security.interfaces.RSAPublicKey;
+
+import org.apache.commons.codec.binary.Base64;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import 
org.apache.hadoop.security.authentication.server.AltKerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.util.CertificateUtil;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.nimbusds.jwt.SignedJWT;
+import com.nimbusds.jose.JOSEException;
+import com.nimbusds.jose.JWSObject;
+import com.nimbusds.jose.JWSVerifier;
+import 

hadoop git commit: YARN-2666. Commit 53959e6

2015-04-07 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7941cc554 - a12b785ed


YARN-2666. Commit 53959e6

(cherry picked from commit 0b5d7d23c469a82cc28a0ff6a4d27e1b3be91394)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a12b785e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a12b785e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a12b785e

Branch: refs/heads/branch-2
Commit: a12b785edc647f45fc8a4483d7d51f64b91c82be
Parents: 7941cc5
Author: Karthik Kambatla ka...@apache.org
Authored: Tue Apr 7 09:07:57 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Tue Apr 7 09:08:49 2015 -0700

--

--




hadoop git commit: HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop servers. (Larry McCay via omalley)

2015-04-07 Thread omalley
Repository: hadoop
Updated Branches:
  refs/heads/trunk 75c545486 - ce6357331


HADOOP-11717. Support JWT tokens for web single sign on to the Hadoop
servers. (Larry McCay via omalley)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ce635733
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ce635733
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ce635733

Branch: refs/heads/trunk
Commit: ce635733144456bce6bcf8664c5850ef6b60aa49
Parents: 75c5454
Author: Owen O'Malley omal...@apache.org
Authored: Tue Apr 7 08:09:41 2015 -0700
Committer: Owen O'Malley omal...@apache.org
Committed: Tue Apr 7 08:09:41 2015 -0700

--
 hadoop-common-project/hadoop-auth/pom.xml   |  11 +
 .../JWTRedirectAuthenticationHandler.java   | 363 
 .../authentication/util/CertificateUtil.java|  65 +++
 .../TestJWTRedirectAuthentictionHandler.java| 418 +++
 .../util/TestCertificateUtil.java   |  96 +
 hadoop-common-project/hadoop-common/CHANGES.txt |   3 +
 hadoop-project/pom.xml  |  13 +
 7 files changed, 969 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce635733/hadoop-common-project/hadoop-auth/pom.xml
--
diff --git a/hadoop-common-project/hadoop-auth/pom.xml 
b/hadoop-common-project/hadoop-auth/pom.xml
index 5f7d774..3999d5a 100644
--- a/hadoop-common-project/hadoop-auth/pom.xml
+++ b/hadoop-common-project/hadoop-auth/pom.xml
@@ -108,6 +108,17 @@
   scopecompile/scope
 /dependency
 dependency
+  groupIdcom.nimbusds/groupId
+  artifactIdnimbus-jose-jwt/artifactId
+  scopecompile/scope
+  exclusions
+exclusion
+  groupIdorg.bouncycastle/groupId
+  artifactIdbcprov-jdk15on/artifactId
+/exclusion
+  /exclusions
+/dependency
+dependency
   groupIdorg.apache.directory.server/groupId
   artifactIdapacheds-kerberos-codec/artifactId
   scopecompile/scope

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ce635733/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
new file mode 100644
index 000..42df6a0
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/JWTRedirectAuthenticationHandler.java
@@ -0,0 +1,363 @@
+/**
+ * Licensed under the Apache License, Version 2.0 (the License);
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License. See accompanying LICENSE file.
+ */
+package org.apache.hadoop.security.authentication.server;
+
+import java.io.IOException;
+
+import javax.servlet.http.Cookie;
+import javax.servlet.ServletException;
+import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
+
+import java.util.ArrayList;
+import java.util.Date;
+import java.util.List;
+import java.util.Properties;
+import java.text.ParseException;
+
+import java.io.ByteArrayInputStream;
+import java.io.UnsupportedEncodingException;
+import java.security.PublicKey;
+import java.security.cert.CertificateFactory;
+import java.security.cert.X509Certificate;
+import java.security.cert.CertificateException;
+import java.security.interfaces.RSAPublicKey;
+
+import org.apache.commons.codec.binary.Base64;
+import 
org.apache.hadoop.security.authentication.client.AuthenticationException;
+import 
org.apache.hadoop.security.authentication.server.AltKerberosAuthenticationHandler;
+import org.apache.hadoop.security.authentication.server.AuthenticationToken;
+import org.apache.hadoop.security.authentication.util.CertificateUtil;
+import org.apache.hadoop.security.authentication.util.KerberosName;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.nimbusds.jwt.SignedJWT;
+import com.nimbusds.jose.JOSEException;
+import com.nimbusds.jose.JWSObject;
+import com.nimbusds.jose.JWSVerifier;
+import 

hadoop git commit: YARN-3110. Few issues in ApplicationHistory web ui. Contributed by Naganarasimha G R

2015-04-07 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk ce6357331 - 19a4feaf6


YARN-3110. Few issues in ApplicationHistory web ui. Contributed by 
Naganarasimha G R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19a4feaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19a4feaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19a4feaf

Branch: refs/heads/trunk
Commit: 19a4feaf6fcf42ebbfe98b8a7153ade96d37fb14
Parents: ce63573
Author: Xuan xg...@apache.org
Authored: Tue Apr 7 08:22:39 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue Apr 7 08:22:39 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt|  2 ++
 .../ApplicationHistoryManagerOnTimelineStore.java  |  8 +---
 .../hadoop/yarn/server/webapp/AppAttemptBlock.java | 13 +++--
 3 files changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a4feaf/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index efb8153..278636d 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -148,6 +148,8 @@ Release 2.8.0 - UNRELEASED
 YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
 error message:Invalid AMRMToken (zxu via rkanter)
 
+YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via 
xgong)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/19a4feaf/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 49041c7..db00d2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -219,10 +219,11 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 String type = null;
 long createdTime = 0;
 long finishedTime = 0;
+float progress = 0.0f;
 ApplicationAttemptId latestApplicationAttemptId = null;
 String diagnosticsInfo = null;
 FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
-YarnApplicationState state = null;
+YarnApplicationState state = YarnApplicationState.ACCEPTED;
 ApplicationResourceUsageReport appResources = null;
 MapApplicationAccessType, String appViewACLs =
 new HashMapApplicationAccessType, String();
@@ -245,7 +246,7 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 ConverterUtils.toApplicationId(entity.getEntityId()),
 latestApplicationAttemptId, user, queue, name, null, -1, null, 
state,
 diagnosticsInfo, null, createdTime, finishedTime, finalStatus, 
null,
-null, 1.0F, type, null), appViewACLs);
+null, progress, type, null), appViewACLs);
   }
   if 
(entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
 queue =
@@ -279,6 +280,7 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   createdTime = event.getTimestamp();
 } else if (event.getEventType().equals(
 ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
+  progress=1.0F;
   finishedTime = event.getTimestamp();
   MapString, Object eventInfo = event.getEventInfo();
   if (eventInfo == null) {
@@ -321,7 +323,7 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 ConverterUtils.toApplicationId(entity.getEntityId()),
 latestApplicationAttemptId, user, queue, name, null, -1, null, state,
 diagnosticsInfo, null, createdTime, finishedTime, finalStatus, 
appResources,
-null, 1.0F, type, null), 

hadoop git commit: YARN-3110. Few issues in ApplicationHistory web ui. Contributed by Naganarasimha G R

2015-04-07 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c16af2f6f - 7941cc554


YARN-3110. Few issues in ApplicationHistory web ui. Contributed by 
Naganarasimha G R

(cherry picked from commit 19a4feaf6fcf42ebbfe98b8a7153ade96d37fb14)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7941cc55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7941cc55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7941cc55

Branch: refs/heads/branch-2
Commit: 7941cc554bfa095aff290c315a61f1b5c0780c32
Parents: c16af2f
Author: Xuan xg...@apache.org
Authored: Tue Apr 7 08:22:39 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue Apr 7 08:24:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt|  2 ++
 .../ApplicationHistoryManagerOnTimelineStore.java  |  8 +---
 .../hadoop/yarn/server/webapp/AppAttemptBlock.java | 13 +++--
 3 files changed, 14 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7941cc55/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1e6190b..da0a434 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -100,6 +100,8 @@ Release 2.8.0 - UNRELEASED
 YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
 error message:Invalid AMRMToken (zxu via rkanter)
 
+YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via 
xgong)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7941cc55/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
index 49041c7..db00d2c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/src/main/java/org/apache/hadoop/yarn/server/applicationhistoryservice/ApplicationHistoryManagerOnTimelineStore.java
@@ -219,10 +219,11 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 String type = null;
 long createdTime = 0;
 long finishedTime = 0;
+float progress = 0.0f;
 ApplicationAttemptId latestApplicationAttemptId = null;
 String diagnosticsInfo = null;
 FinalApplicationStatus finalStatus = FinalApplicationStatus.UNDEFINED;
-YarnApplicationState state = null;
+YarnApplicationState state = YarnApplicationState.ACCEPTED;
 ApplicationResourceUsageReport appResources = null;
 MapApplicationAccessType, String appViewACLs =
 new HashMapApplicationAccessType, String();
@@ -245,7 +246,7 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 ConverterUtils.toApplicationId(entity.getEntityId()),
 latestApplicationAttemptId, user, queue, name, null, -1, null, 
state,
 diagnosticsInfo, null, createdTime, finishedTime, finalStatus, 
null,
-null, 1.0F, type, null), appViewACLs);
+null, progress, type, null), appViewACLs);
   }
   if 
(entityInfo.containsKey(ApplicationMetricsConstants.QUEUE_ENTITY_INFO)) {
 queue =
@@ -279,6 +280,7 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
   createdTime = event.getTimestamp();
 } else if (event.getEventType().equals(
 ApplicationMetricsConstants.FINISHED_EVENT_TYPE)) {
+  progress=1.0F;
   finishedTime = event.getTimestamp();
   MapString, Object eventInfo = event.getEventInfo();
   if (eventInfo == null) {
@@ -321,7 +323,7 @@ public class ApplicationHistoryManagerOnTimelineStore 
extends AbstractService
 ConverterUtils.toApplicationId(entity.getEntityId()),
 latestApplicationAttemptId, user, queue, name, null, -1, null, state,
 diagnosticsInfo, null, createdTime, 

hadoop git commit: YARN-3294. Allow dumping of Capacity Scheduler debug logs via web UI for a fixed time period. Contributed by Varun Vasudev

2015-04-07 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/trunk 0b5d7d23c - d27e9241e


YARN-3294. Allow dumping of Capacity Scheduler debug logs via web UI for
a fixed time period. Contributed by Varun Vasudev


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d27e9241
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d27e9241
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d27e9241

Branch: refs/heads/trunk
Commit: d27e9241e8676a0edb2d35453cac5f9495fcd605
Parents: 0b5d7d2
Author: Xuan xg...@apache.org
Authored: Tue Apr 7 09:52:36 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue Apr 7 09:52:36 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../apache/hadoop/yarn/util/AdHocLogDumper.java | 131 +++
 .../hadoop/yarn/util/TestAdHocLogDumper.java|  86 
 .../webapp/CapacitySchedulerPage.java   |  34 +
 .../resourcemanager/webapp/RMWebServices.java   |  26 
 5 files changed, 280 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d27e9241/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 278636d..f2950bf 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -101,6 +101,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2901. Add errors and warning metrics page to RM, NM web UI. 
 (Varun Vasudev via wangda)
 
+YARN-3294. Allow dumping of Capacity Scheduler debug logs via
+web UI for a fixed time period. (Varun Vasudev via xgong)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d27e9241/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
new file mode 100644
index 000..d2e4c74
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.log4j.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AdHocLogDumper {
+
+  private static final Log LOG = LogFactory.getLog(AdHocLogDumper.class);
+
+  private String name;
+  private String targetFilename;
+  private MapString, Priority appenderLevels;
+  private Level currentLogLevel;
+  public static final String AD_HOC_DUMPER_APPENDER = ad-hoc-dumper-appender;
+  private static boolean logFlag = false;
+  private static final Object lock = new Object();
+
+  public AdHocLogDumper(String name, String targetFilename) {
+this.name = name;
+this.targetFilename = targetFilename;
+appenderLevels = new HashMap();
+  }
+
+  public void dumpLogs(String level, int timePeriod)
+  throws YarnRuntimeException, IOException {
+synchronized (lock) {
+  if (logFlag) {
+LOG.info(Attempt to dump logs when appender is already running);
+throw new YarnRuntimeException(Appender is already dumping logs);
+  }
+  Level targetLevel = Level.toLevel(level);
+  Log log = LogFactory.getLog(name);
+  

hadoop git commit: YARN-2666. Commit 53959e6

2015-04-07 Thread kasha
Repository: hadoop
Updated Branches:
  refs/heads/trunk 19a4feaf6 - 0b5d7d23c


YARN-2666. Commit 53959e6


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b5d7d23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b5d7d23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b5d7d23

Branch: refs/heads/trunk
Commit: 0b5d7d23c469a82cc28a0ff6a4d27e1b3be91394
Parents: 19a4fea
Author: Karthik Kambatla ka...@apache.org
Authored: Tue Apr 7 09:07:57 2015 -0700
Committer: Karthik Kambatla ka...@apache.org
Committed: Tue Apr 7 09:07:57 2015 -0700

--

--




hadoop git commit: YARN-3294. Allow dumping of Capacity Scheduler debug logs via web UI for a fixed time period. Contributed by Varun Vasudev

2015-04-07 Thread xgong
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a12b785ed - 0522d6970


YARN-3294. Allow dumping of Capacity Scheduler debug logs via web UI for
a fixed time period. Contributed by Varun Vasudev

(cherry picked from commit d27e9241e8676a0edb2d35453cac5f9495fcd605)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0522d697
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0522d697
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0522d697

Branch: refs/heads/branch-2
Commit: 0522d6970dfc780eeeaef1139f8a9a50a4268241
Parents: a12b785
Author: Xuan xg...@apache.org
Authored: Tue Apr 7 09:52:36 2015 -0700
Committer: Xuan xg...@apache.org
Committed: Tue Apr 7 09:54:29 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../apache/hadoop/yarn/util/AdHocLogDumper.java | 131 +++
 .../hadoop/yarn/util/TestAdHocLogDumper.java|  86 
 .../webapp/CapacitySchedulerPage.java   |  34 +
 .../resourcemanager/webapp/RMWebServices.java   |  26 
 5 files changed, 280 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0522d697/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index da0a434..c1f7eb3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -53,6 +53,9 @@ Release 2.8.0 - UNRELEASED
 YARN-2901. Add errors and warning metrics page to RM, NM web UI. 
 (Varun Vasudev via wangda)
 
+YARN-3294. Allow dumping of Capacity Scheduler debug logs via
+web UI for a fixed time period. (Varun Vasudev via xgong)
+
   OPTIMIZATIONS
 
 YARN-3339. TestDockerContainerExecutor should pull a single image and not

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0522d697/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
new file mode 100644
index 000..d2e4c74
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/AdHocLogDumper.java
@@ -0,0 +1,131 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.util;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.commons.logging.impl.Log4JLogger;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.exceptions.YarnRuntimeException;
+import org.apache.log4j.*;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.*;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AdHocLogDumper {
+
+  private static final Log LOG = LogFactory.getLog(AdHocLogDumper.class);
+
+  private String name;
+  private String targetFilename;
+  private MapString, Priority appenderLevels;
+  private Level currentLogLevel;
+  public static final String AD_HOC_DUMPER_APPENDER = ad-hoc-dumper-appender;
+  private static boolean logFlag = false;
+  private static final Object lock = new Object();
+
+  public AdHocLogDumper(String name, String targetFilename) {
+this.name = name;
+this.targetFilename = targetFilename;
+appenderLevels = new HashMap();
+  }
+
+  public void dumpLogs(String level, int timePeriod)
+  throws YarnRuntimeException, IOException {
+synchronized (lock) {
+  if (logFlag) {
+LOG.info(Attempt to dump logs when appender is already running);
+throw new YarnRuntimeException(Appender is already dumping logs);
+  }
+  Level targetLevel = 

hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang

2015-04-07 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 bd3f89108 - 621a5fa46


HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/621a5fa4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/621a5fa4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/621a5fa4

Branch: refs/heads/HDFS-7285
Commit: 621a5fa46c6317ead6d3565432919d7b43345231
Parents: bd3f891
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:17:12 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Apr 7 11:17:12 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  56 ++-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  76 +++-
 .../hadoop/hdfs/DFSStripedInputStream.java  | 367 +++
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   4 +
 .../hdfs/protocol/LocatedStripedBlock.java  |   5 +
 .../blockmanagement/BlockInfoStriped.java   |   6 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  80 +++-
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 307 
 .../namenode/TestRecoverStripedBlocks.java  |  88 +
 12 files changed, 894 insertions(+), 107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/621a5fa4/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9927ccf..21bfc69 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,4 +49,6 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
\ No newline at end of file
+manage EC zones (Zhe Zhang)
+
+HDFS-7782. Erasure coding: pread from files in striped layout (Zhe Zhang)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/621a5fa4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 6a82160..bfb1022 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -187,7 +187,6 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -279,6 +278,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
   new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
+  private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
   private final Sampler? traceSampler;
 
   /**
@@ -720,6 +720,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (numThreads  0) {
   this.initThreadsNumForHedgedReads(numThreads);
 }
+numThreads = conf.getInt(
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE,
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+if (numThreads = 0) {
+  LOG.warn(The value of 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE
+  +  must be greater than 0. The current setting is  + numThreads
+  + . Reset it to the default value 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+  numThreads =
+  DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE;
+}
+this.initThreadsNumForStripedReads(numThreads);
 this.saslClient = new SaslDataTransferClient(
   conf, 

hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-04-07 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 621a5fa46 - 196774350 (forced update)


HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/19677435
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/19677435
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/19677435

Branch: refs/heads/HDFS-7285
Commit: 196774350191b9aa2f5daa368af869f44aae21c1
Parents: bd3f891
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Tue Apr 7 11:20:13 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  56 ++-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   6 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  76 +++-
 .../hadoop/hdfs/DFSStripedInputStream.java  | 367 +++
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   4 +
 .../hdfs/protocol/LocatedStripedBlock.java  |   5 +
 .../blockmanagement/BlockInfoStriped.java   |   6 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  80 +++-
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 307 
 .../namenode/TestRecoverStripedBlocks.java  |  88 +
 11 files changed, 891 insertions(+), 106 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/19677435/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 6a82160..bfb1022 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -187,7 +187,6 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.DataTransferSaslUtil;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocolPB.PBHelper;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey;
@@ -279,6 +278,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
   new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
+  private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
   private final Sampler? traceSampler;
 
   /**
@@ -720,6 +720,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (numThreads  0) {
   this.initThreadsNumForHedgedReads(numThreads);
 }
+numThreads = conf.getInt(
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE,
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+if (numThreads = 0) {
+  LOG.warn(The value of 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE
+  +  must be greater than 0. The current setting is  + numThreads
+  + . Reset it to the default value 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+  numThreads =
+  DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE;
+}
+this.initThreadsNumForStripedReads(numThreads);
 this.saslClient = new SaslDataTransferClient(
   conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
   TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
@@ -3519,6 +3532,43 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  /**
+   * Create thread pool for parallel reading in striped layout,
+   * STRIPED_READ_THREAD_POOL, if it does not already exist.
+   * @param num Number of threads for striped reads thread pool.
+   */
+  private void initThreadsNumForStripedReads(int num) {
+assert num  0;
+if (STRIPED_READ_THREAD_POOL != null) {
+  return;
+}
+synchronized (DFSClient.class) {
+  if (STRIPED_READ_THREAD_POOL == null) {
+STRIPED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60,
+TimeUnit.SECONDS, new SynchronousQueueRunnable(),
+new Daemon.DaemonFactory() {
+  private 

hadoop git commit: HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client implementation. Contributed by Takuya Fukudome

2015-04-07 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 0522d6970 - a4f0eea26


HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client 
implementation. Contributed by Takuya Fukudome


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a4f0eea2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a4f0eea2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a4f0eea2

Branch: refs/heads/branch-2
Commit: a4f0eea261c5a62c404c331d89a6fdccf25f741e
Parents: 0522d69
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Apr 7 13:59:48 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Tue Apr 7 14:01:55 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReader.java | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java| 2 ++
 .../main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java  | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java | 3 +++
 .../java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java | 2 ++
 .../main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java| 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSPacket.java   | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java| 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java | 2 ++
 .../hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java  | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java| 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java| 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java   | 2 ++
 15 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f0eea2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0e3defc..c8c556e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -64,6 +64,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7888. Change DFSOutputStream and DataStreamer for convenience of
 subclassing. (Li Bo via szetszwo)
 
+HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client
+implementation. (Takuya Fukudome via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f0eea2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
index 7cd2426..aa3e8ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ByteBufferReadable;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
@@ -28,6 +29,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
  * A BlockReader is responsible for reading a single block
  * from a single datanode.
  */
+@InterfaceAudience.Private
 public interface BlockReader extends ByteBufferReadable {
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a4f0eea2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index 8073ea0..ab93441 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -24,6 +24,7 @@ import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSClient.Conf;

[1/2] hadoop git commit: HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in platform-specific format. Contributed by Xiaoyu Yao.

2015-04-07 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a4f0eea26 - 892152644
  refs/heads/trunk 571a1ce9d - 1e72d98c6


HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in 
platform-specific format. Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e72d98c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e72d98c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e72d98c

Branch: refs/heads/trunk
Commit: 1e72d98c69bef3526cf0eb617de69e0b6d2fc13c
Parents: 571a1ce
Author: cnauroth cnaur...@apache.org
Authored: Tue Apr 7 13:33:11 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Apr 7 14:23:01 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../tools/offlineImageViewer/PBImageDelimitedTextWriter.java | 8 +---
 .../hdfs/tools/offlineImageViewer/PBImageTextWriter.java | 7 +--
 3 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e72d98c/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3edc80e..51d84f4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1388,6 +1388,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
 lock for a very long time (sinago via cmccabe)
 
+HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
+platform-specific format. (Xiaoyu Yao via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e72d98c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
index 350967d..fbe7f3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.text.SimpleDateFormat;
@@ -79,8 +79,10 @@ public class PBImageDelimitedTextWriter extends 
PBImageTextWriter {
   @Override
   public String getEntry(String parent, INode inode) {
 StringBuffer buffer = new StringBuffer();
-String path = new File(parent, inode.getName().toStringUtf8()).toString();
-buffer.append(path);
+String inodeName = inode.getName().toStringUtf8();
+Path path = new Path(parent.isEmpty() ? / : parent,
+  inodeName.isEmpty() ? / : inodeName);
+buffer.append(path.toString());
 PermissionStatus p = null;
 
 switch (inode.getType()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e72d98c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index d228920..d2ccc5c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
 import 

[2/3] hadoop git commit: HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. Contributed by Xiaoyu Yao.

2015-04-07 Thread cnauroth
HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. 
Contributed by Xiaoyu Yao.

(cherry picked from commit bd77a7c4d94fe8a74b36deb50e19396c98b8908e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6e706118
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6e706118
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6e706118

Branch: refs/heads/branch-2
Commit: 6e70611819e81eb0690dda00b9789ed250f3959c
Parents: 8921526
Author: cnauroth cnaur...@apache.org
Authored: Tue Apr 7 14:47:21 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Apr 7 14:47:41 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/security/TestShellBasedIdMapping.java  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e706118/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 83d6bec..a08caa7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -790,6 +790,9 @@ Release 2.7.0 - UNRELEASED
 Ozawa via vinodkv)
 
 HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. (Li Lu via 
vinodkv)
+
+HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
+(Xiaoyu Yao via cnauroth)
 
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6e706118/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
index e6e1d73..3b533d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
@@ -129,6 +129,7 @@ public class TestShellBasedIdMapping {
   // Test staticMap refreshing
   @Test
   public void testStaticMapUpdate() throws IOException {
+assumeTrue(!Shell.WINDOWS);
 File tempStaticMapFile = File.createTempFile(nfs-, .map);
 tempStaticMapFile.delete();
 Configuration conf = new Configuration();



[3/3] hadoop git commit: HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. Contributed by Xiaoyu Yao.

2015-04-07 Thread cnauroth
HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. 
Contributed by Xiaoyu Yao.

(cherry picked from commit bd77a7c4d94fe8a74b36deb50e19396c98b8908e)
(cherry picked from commit 6e70611819e81eb0690dda00b9789ed250f3959c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6ad39e4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6ad39e4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6ad39e4

Branch: refs/heads/branch-2.7
Commit: c6ad39e406cdf351af6048a494162082492d1880
Parents: 5021f52
Author: cnauroth cnaur...@apache.org
Authored: Tue Apr 7 14:47:21 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Apr 7 14:47:52 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/security/TestShellBasedIdMapping.java  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6ad39e4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index f008348..0f75ed2 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -723,6 +723,9 @@ Release 2.7.0 - UNRELEASED
 Ozawa via vinodkv)
 
 HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. (Li Lu via 
vinodkv)
+
+HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
+(Xiaoyu Yao via cnauroth)
 
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6ad39e4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
index e6e1d73..3b533d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
@@ -129,6 +129,7 @@ public class TestShellBasedIdMapping {
   // Test staticMap refreshing
   @Test
   public void testStaticMapUpdate() throws IOException {
+assumeTrue(!Shell.WINDOWS);
 File tempStaticMapFile = File.createTempFile(nfs-, .map);
 tempStaticMapFile.delete();
 Configuration conf = new Configuration();



[1/3] hadoop git commit: HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. Contributed by Xiaoyu Yao.

2015-04-07 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 892152644 - 6e7061181
  refs/heads/branch-2.7 5021f52f7 - c6ad39e40
  refs/heads/trunk 1e72d98c6 - bd77a7c4d


HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows. 
Contributed by Xiaoyu Yao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bd77a7c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bd77a7c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bd77a7c4

Branch: refs/heads/trunk
Commit: bd77a7c4d94fe8a74b36deb50e19396c98b8908e
Parents: 1e72d98
Author: cnauroth cnaur...@apache.org
Authored: Tue Apr 7 14:47:21 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Apr 7 14:47:21 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../java/org/apache/hadoop/security/TestShellBasedIdMapping.java  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd77a7c4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 5a8cda4..67050e7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1210,6 +1210,9 @@ Release 2.7.0 - UNRELEASED
 Ozawa via vinodkv)
 
 HADOOP-11776. Fixed the broken JDiff support in Hadoop 2. (Li Lu via 
vinodkv)
+
+HADOOP-11796. Skip TestShellBasedIdMapping.testStaticMapUpdate on Windows.
+(Xiaoyu Yao via cnauroth)
 
 Release 2.6.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bd77a7c4/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
index e6e1d73..3b533d2 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestShellBasedIdMapping.java
@@ -129,6 +129,7 @@ public class TestShellBasedIdMapping {
   // Test staticMap refreshing
   @Test
   public void testStaticMapUpdate() throws IOException {
+assumeTrue(!Shell.WINDOWS);
 File tempStaticMapFile = File.createTempFile(nfs-, .map);
 tempStaticMapFile.delete();
 Configuration conf = new Configuration();



hadoop git commit: HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in platform-specific format. Contributed by Xiaoyu Yao.

2015-04-07 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 2cb9dac9a - 5021f52f7


HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in 
platform-specific format. Contributed by Xiaoyu Yao.

(cherry picked from commit 672ed462965ea7da10ce3df48c02e2a3bd13f0ae)
(cherry picked from commit 950dc1b1995191327e6b919ee17a87e59b749264)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5021f52f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5021f52f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5021f52f

Branch: refs/heads/branch-2.7
Commit: 5021f52f7bf65c3002e1c1767060aa728847c533
Parents: 2cb9dac
Author: cnauroth cnaur...@apache.org
Authored: Tue Apr 7 13:33:11 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Apr 7 13:33:38 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../tools/offlineImageViewer/PBImageDelimitedTextWriter.java | 8 +---
 .../hdfs/tools/offlineImageViewer/PBImageTextWriter.java | 7 +--
 3 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5021f52f/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 657de32..c36ea82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -971,6 +971,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
 lock for a very long time (sinago via cmccabe)
 
+HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
+platform-specific format. (Xiaoyu Yao via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5021f52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
index 350967d..fbe7f3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.text.SimpleDateFormat;
@@ -79,8 +79,10 @@ public class PBImageDelimitedTextWriter extends 
PBImageTextWriter {
   @Override
   public String getEntry(String parent, INode inode) {
 StringBuffer buffer = new StringBuffer();
-String path = new File(parent, inode.getName().toStringUtf8()).toString();
-buffer.append(path);
+String inodeName = inode.getName().toStringUtf8();
+Path path = new Path(parent.isEmpty() ? / : parent,
+  inodeName.isEmpty() ? / : inodeName);
+buffer.append(path.toString());
 PermissionStatus p = null;
 
 switch (inode.getType()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5021f52f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index d228920..d2ccc5c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -21,6 +21,7 @@ import 

hadoop git commit: HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client implementation. Contributed by Takuya Fukudome

2015-04-07 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk d27e9241e - 571a1ce9d


HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client 
implementation. Contributed by Takuya Fukudome


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/571a1ce9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/571a1ce9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/571a1ce9

Branch: refs/heads/trunk
Commit: 571a1ce9d037d99e7c9042bcb77ae7a2c4daf6d3
Parents: d27e924
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Apr 7 13:59:48 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Tue Apr 7 13:59:48 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReader.java | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java| 2 ++
 .../main/java/org/apache/hadoop/hdfs/BlockReaderLocalLegacy.java  | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/BlockReaderUtil.java | 3 +++
 .../java/org/apache/hadoop/hdfs/CorruptFileBlockIterator.java | 2 ++
 .../main/java/org/apache/hadoop/hdfs/DFSHedgedReadMetrics.java| 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/DFSPacket.java   | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/DataStreamer.java| 3 +++
 .../src/main/java/org/apache/hadoop/hdfs/ExtendedBlockId.java | 2 ++
 .../hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HAUtil.java  | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/KeyProviderCache.java| 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/LeaseRenewer.java| 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/NameNodeProxies.java | 2 ++
 .../src/main/java/org/apache/hadoop/hdfs/RemotePeerFactory.java   | 2 ++
 15 files changed, 34 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/571a1ce9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7d20060..3edc80e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -379,6 +379,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7888. Change DFSOutputStream and DataStreamer for convenience of
 subclassing. (Li Bo via szetszwo)
 
+HDFS-8049. Add @InterfaceAudience.Private annotation to hdfs client
+implementation. (Takuya Fukudome via szetszwo)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/571a1ce9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
index 7cd2426..aa3e8ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReader.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ByteBufferReadable;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
@@ -28,6 +29,7 @@ import org.apache.hadoop.hdfs.shortcircuit.ClientMmap;
  * A BlockReader is responsible for reading a single block
  * from a single datanode.
  */
+@InterfaceAudience.Private
 public interface BlockReader extends ByteBufferReadable {
   
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/571a1ce9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
index 8073ea0..ab93441 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/BlockReaderLocal.java
@@ -24,6 +24,7 @@ import java.util.EnumSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ReadOption;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSClient.Conf;
@@ 

[2/2] hadoop git commit: HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in platform-specific format. Contributed by Xiaoyu Yao.

2015-04-07 Thread cnauroth
HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in 
platform-specific format. Contributed by Xiaoyu Yao.

(cherry picked from commit 672ed462965ea7da10ce3df48c02e2a3bd13f0ae)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/89215264
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/89215264
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/89215264

Branch: refs/heads/branch-2
Commit: 89215264405afbe79f8ffe5fe3e47331cab44d94
Parents: a4f0eea
Author: cnauroth cnaur...@apache.org
Authored: Tue Apr 7 13:33:11 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Tue Apr 7 14:23:29 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../tools/offlineImageViewer/PBImageDelimitedTextWriter.java | 8 +---
 .../hdfs/tools/offlineImageViewer/PBImageTextWriter.java | 7 +--
 3 files changed, 13 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/89215264/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index c8c556e..16002a6 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1087,6 +1087,9 @@ Release 2.7.0 - UNRELEASED
 HDFS-7999. FsDatasetImpl#createTemporary sometimes holds the FSDatasetImpl
 lock for a very long time (sinago via cmccabe)
 
+HDFS-8038. PBImageDelimitedTextWriter#getEntry output HDFS path in
+platform-specific format. (Xiaoyu Yao via cnauroth)
+
 BREAKDOWN OF HDFS-7584 SUBTASKS AND RELATED JIRAS
 
   HDFS-7720. Quota by Storage Type API, tools and ClientNameNode

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89215264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
index 350967d..fbe7f3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageDelimitedTextWriter.java
@@ -17,13 +17,13 @@
  */
 package org.apache.hadoop.hdfs.tools.offlineImageViewer;
 
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INode;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeFile;
 import 
org.apache.hadoop.hdfs.server.namenode.FsImageProto.INodeSection.INodeSymlink;
 
-import java.io.File;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.text.SimpleDateFormat;
@@ -79,8 +79,10 @@ public class PBImageDelimitedTextWriter extends 
PBImageTextWriter {
   @Override
   public String getEntry(String parent, INode inode) {
 StringBuffer buffer = new StringBuffer();
-String path = new File(parent, inode.getName().toStringUtf8()).toString();
-buffer.append(path);
+String inodeName = inode.getName().toStringUtf8();
+Path path = new Path(parent.isEmpty() ? / : parent,
+  inodeName.isEmpty() ? / : inodeName);
+buffer.append(path.toString());
 PermissionStatus p = null;
 
 switch (inode.getType()) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/89215264/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
index d228920..d2ccc5c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageTextWriter.java
@@ -21,6 +21,7 @@ import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
+import 

hadoop git commit: YARN-3429. Fix incorrect CHANGES.txt

2015-04-07 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6e7061181 - b584ce6c0


YARN-3429. Fix incorrect CHANGES.txt

(cherry picked from commit 5b8a3ae366294aec492f69f1a429aa7fce5d13be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b584ce6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b584ce6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b584ce6c

Branch: refs/heads/branch-2
Commit: b584ce6c0aba76382fec43eb2386227b163a883d
Parents: 6e70611
Author: Robert Kanter rkan...@apache.org
Authored: Tue Apr 7 16:15:42 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Tue Apr 7 16:16:08 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b584ce6c/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index c1f7eb3..a4673bd 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -100,7 +100,7 @@ Release 2.8.0 - UNRELEASED
 YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
 (Zhihai Xu via ozawa)
 
-YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
+YARN-3429. TestAMRMTokens.testTokenExpiry fails Intermittently with
 error message:Invalid AMRMToken (zxu via rkanter)
 
 YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via 
xgong)



hadoop git commit: YARN-3429. Fix incorrect CHANGES.txt

2015-04-07 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk bd77a7c4d - 5b8a3ae36


YARN-3429. Fix incorrect CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b8a3ae3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b8a3ae3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b8a3ae3

Branch: refs/heads/trunk
Commit: 5b8a3ae366294aec492f69f1a429aa7fce5d13be
Parents: bd77a7c
Author: Robert Kanter rkan...@apache.org
Authored: Tue Apr 7 16:15:42 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Tue Apr 7 16:15:42 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b8a3ae3/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index f2950bf..01d3429 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -148,7 +148,7 @@ Release 2.8.0 - UNRELEASED
 YARN-2666. TestFairScheduler.testContinuousScheduling fails Intermittently.
 (Zhihai Xu via ozawa)
 
-YARN-2429. TestAMRMTokens.testTokenExpiry fails Intermittently with
+YARN-3429. TestAMRMTokens.testTokenExpiry fails Intermittently with
 error message:Invalid AMRMToken (zxu via rkanter)
 
 YARN-3110. Few issues in ApplicationHistory web ui. (Naganarasimha G R via 
xgong)



hadoop git commit: HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)

2015-04-07 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 c6ad39e40 - ffa3f3a10


HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ffa3f3a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ffa3f3a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ffa3f3a1

Branch: refs/heads/branch-2.7
Commit: ffa3f3a1002dd39e807b4abc3686d204be4620f2
Parents: c6ad39e
Author: Arpit Agarwal a...@apache.org
Authored: Tue Apr 7 18:06:05 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Tue Apr 7 18:07:11 2015 -0700

--
 BUILDING.txt| 10 ++
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 2 files changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffa3f3a1/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 02b8610..b30b30e 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -28,14 +28,8 @@ Installing required packages for clean install of Ubuntu 
14.04 LTS Desktop:
   $ sudo apt-get -y install maven
 * Native libraries
   $ sudo apt-get -y install build-essential autoconf automake libtool cmake 
zlib1g-dev pkg-config libssl-dev
-* ProtocolBuffer 2.5.0
-  $ wget 
https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz
-  $ tar -zxvf protobuf-2.5.0.tar.gz
-  $ cd protobuf-2.5.0.tar.gz
-  $ ./configure
-  $ make
-  $ sudo make install
-  $ sudo ldconfig
+* ProtocolBuffer 2.5.0 (required)
+  $ sudo apt-get -y install libprotobuf-dev protobuf-compiler
 
 Optional packages:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ffa3f3a1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0f75ed2..a4605ac 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -244,6 +244,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10670. Allow AuthenticationFilters to load secret from signature
 secret files. (Kai Zheng via wheat9)
 
+HADOOP-11801. Update BUILDING.txt for Ubuntu. (Gabor Liptak via
+Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.



[2/2] hadoop git commit: HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)

2015-04-07 Thread arp
HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d3657a5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d3657a5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d3657a5

Branch: refs/heads/branch-2
Commit: 8d3657a5779d5ed5a8f95ca8012888d86104e5fa
Parents: b584ce6
Author: Arpit Agarwal a...@apache.org
Authored: Tue Apr 7 18:06:05 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Tue Apr 7 18:06:14 2015 -0700

--
 BUILDING.txt| 10 ++
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 2 files changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3657a5/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index 02b8610..b30b30e 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -28,14 +28,8 @@ Installing required packages for clean install of Ubuntu 
14.04 LTS Desktop:
   $ sudo apt-get -y install maven
 * Native libraries
   $ sudo apt-get -y install build-essential autoconf automake libtool cmake 
zlib1g-dev pkg-config libssl-dev
-* ProtocolBuffer 2.5.0
-  $ wget 
https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz
-  $ tar -zxvf protobuf-2.5.0.tar.gz
-  $ cd protobuf-2.5.0.tar.gz
-  $ ./configure
-  $ make
-  $ sudo make install
-  $ sudo ldconfig
+* ProtocolBuffer 2.5.0 (required)
+  $ sudo apt-get -y install libprotobuf-dev protobuf-compiler
 
 Optional packages:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d3657a5/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a08caa7..be39765 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -311,6 +311,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10670. Allow AuthenticationFilters to load secret from signature
 secret files. (Kai Zheng via wheat9)
 
+HADOOP-11801. Update BUILDING.txt for Ubuntu. (Gabor Liptak via
+Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.



[1/2] hadoop git commit: HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)

2015-04-07 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b584ce6c0 - 8d3657a57
  refs/heads/trunk 5b8a3ae36 - 5449adc9e


HADOOP-11801. Update BUILDING.txt for Ubuntu. (Contributed by Gabor Liptak)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5449adc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5449adc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5449adc9

Branch: refs/heads/trunk
Commit: 5449adc9e5fa0607b27caacd0f7aafc18c100975
Parents: 5b8a3ae
Author: Arpit Agarwal a...@apache.org
Authored: Tue Apr 7 18:06:05 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Tue Apr 7 18:06:05 2015 -0700

--
 BUILDING.txt| 10 ++
 hadoop-common-project/hadoop-common/CHANGES.txt |  3 +++
 2 files changed, 5 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5449adc9/BUILDING.txt
--
diff --git a/BUILDING.txt b/BUILDING.txt
index f3b6853..3ca9fae 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -28,14 +28,8 @@ Installing required packages for clean install of Ubuntu 
14.04 LTS Desktop:
   $ sudo apt-get -y install maven
 * Native libraries
   $ sudo apt-get -y install build-essential autoconf automake libtool cmake 
zlib1g-dev pkg-config libssl-dev
-* ProtocolBuffer 2.5.0
-  $ wget 
https://github.com/google/protobuf/releases/download/v2.5.0/protobuf-2.5.0.tar.gz
-  $ tar -zxvf protobuf-2.5.0.tar.gz
-  $ cd protobuf-2.5.0.tar.gz
-  $ ./configure
-  $ make
-  $ sudo make install
-  $ sudo ldconfig
+* ProtocolBuffer 2.5.0 (required)
+  $ sudo apt-get -y install libprotobuf-dev protobuf-compiler
 
 Optional packages:
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5449adc9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 67050e7..412bad7 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -747,6 +747,9 @@ Release 2.7.0 - UNRELEASED
 HADOOP-10670. Allow AuthenticationFilters to load secret from signature
 secret files. (Kai Zheng via wheat9)
 
+HADOOP-11801. Update BUILDING.txt for Ubuntu. (Gabor Liptak via
+Arpit Agarwal)
+
   OPTIMIZATIONS
 
 HADOOP-11323. WritableComparator#compare keeps reference to byte array.



hadoop git commit: HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be easily overrided. (Contributed by Walter Su)

2015-04-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4be648b55 - d505c8acd


HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be 
easily overrided. (Contributed by Walter Su)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d505c8ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d505c8ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d505c8ac

Branch: refs/heads/trunk
Commit: d505c8acd30d6f40d0632fe9c93c886a4499a9fc
Parents: 4be648b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 09:56:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 8 09:56:37 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../BlockPlacementPolicyDefault.java| 87 
 2 files changed, 54 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d505c8ac/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f194bd7..ac508cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -385,6 +385,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
 HdfsClientConfigKeys.Retry.  (szetszwo)
 
+HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
+can be easily overrided. (Walter Su via vinayakumarb)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d505c8ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3262772..09db986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -333,41 +333,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 +  unavailableStorages= + unavailableStorages
 + , storagePolicy= + storagePolicy);
   }
-
-  if (numOfResults == 0) {
-writer = chooseLocalStorage(writer, excludedNodes, blocksize,
-maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
-.getDatanodeDescriptor();
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
-  if (numOfResults = 1) {
-chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-results, avoidStaleNodes, storageTypes);
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  if (numOfResults = 2) {
-final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
-if (clusterMap.isOnSameRack(dn0, dn1)) {
-  chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else if (newBlock){
-  chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else {
-  chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-}
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
-  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+  writer = chooseTargetInOrder(numOfReplicas, writer, excludedNodes, 
blocksize,
+  maxNodesPerRack, results, avoidStaleNodes, newBlock, storageTypes);
 } catch (NotEnoughReplicasException e) {
   final String message = Failed to place enough replicas, still in need 
of 
   + (totalReplicasExpected - results.size()) +  to reach 
@@ -422,7 +389,55 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 }
 return writer;
   }
-
+
+  protected Node chooseTargetInOrder(int numOfReplicas, 
+ Node writer,
+ 

hadoop git commit: HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be easily overrided. (Contributed by Walter Su)

2015-04-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eb3ca514f - 3cf7ac181


HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be 
easily overrided. (Contributed by Walter Su)

(cherry picked from commit d505c8acd30d6f40d0632fe9c93c886a4499a9fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cf7ac18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cf7ac18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cf7ac18

Branch: refs/heads/branch-2
Commit: 3cf7ac181ba9c6a31d4452d05d796901c5ed5517
Parents: eb3ca51
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 09:56:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 8 09:57:49 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../BlockPlacementPolicyDefault.java| 87 
 2 files changed, 54 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf7ac18/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 687d7d5..e969644 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -70,6 +70,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
 HdfsClientConfigKeys.Retry.  (szetszwo)
 
+HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
+can be easily overrided. (Walter Su via vinayakumarb)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf7ac18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3262772..09db986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -333,41 +333,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 +  unavailableStorages= + unavailableStorages
 + , storagePolicy= + storagePolicy);
   }
-
-  if (numOfResults == 0) {
-writer = chooseLocalStorage(writer, excludedNodes, blocksize,
-maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
-.getDatanodeDescriptor();
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
-  if (numOfResults = 1) {
-chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-results, avoidStaleNodes, storageTypes);
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  if (numOfResults = 2) {
-final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
-if (clusterMap.isOnSameRack(dn0, dn1)) {
-  chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else if (newBlock){
-  chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else {
-  chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-}
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
-  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+  writer = chooseTargetInOrder(numOfReplicas, writer, excludedNodes, 
blocksize,
+  maxNodesPerRack, results, avoidStaleNodes, newBlock, storageTypes);
 } catch (NotEnoughReplicasException e) {
   final String message = Failed to place enough replicas, still in need 
of 
   + (totalReplicasExpected - results.size()) +  to reach 
@@ -422,7 +389,55 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 }
 return writer;
   }
-
+
+  protected Node chooseTargetInOrder(int numOfReplicas, 
+

hadoop git commit: HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a package local class. Contributed by Haohui Mai.

2015-04-07 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk d505c8acd - ab04ff9ef


HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a 
package local class. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab04ff9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab04ff9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab04ff9e

Branch: refs/heads/trunk
Commit: ab04ff9efe632b4eca6faca7407ac35e00e6a379
Parents: d505c8a
Author: Haohui Mai whe...@apache.org
Authored: Tue Apr 7 21:23:52 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Apr 7 21:30:57 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 444 +
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 485 +++
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  30 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  28 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |  92 
 .../apache/hadoop/hdfs/web/TestJsonUtil.java|  14 +-
 .../hadoop/hdfs/web/TestWebHDFSForHA.java   |  84 
 .../hadoop/hdfs/web/TestWebHdfsTokens.java  |   3 +-
 9 files changed, 625 insertions(+), 558 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab04ff9e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index ac508cb..84e382a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -388,6 +388,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
 can be easily overrided. (Walter Su via vinayakumarb)
 
+HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a
+package local class. (wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab04ff9e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index d53bc31..252b0f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -21,34 +21,22 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.*;
 
 /** JSON Utilities */
 public class JsonUtil {
   private static final Object[] EMPTY_OBJECT_ARRAY = {};
-  private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
 
   /** Convert a token object to a Json string. */
   public static String toJsonString(final Token? extends TokenIdentifier 
token
@@ -67,34 +55,6 @@ public class JsonUtil {
 return m;
   }
 
-  /** Convert a Json map to a Token. */
-  public static Token? extends TokenIdentifier toToken(
-  final Map?, ? m) throws IOException {
-if (m == null) {
-  return null;
-}
-
-final TokenDelegationTokenIdentifier token
-= new TokenDelegationTokenIdentifier();
-token.decodeFromUrlString((String)m.get(urlString));
-return token;
-  }
-
-  /** Convert a Json map to a Token of DelegationTokenIdentifier. */
-  @SuppressWarnings(unchecked)
-  public 

hadoop git commit: HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a package local class. Contributed by Haohui Mai.

2015-04-07 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 3cf7ac181 - f9fbde307


HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a 
package local class. Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f9fbde30
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f9fbde30
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f9fbde30

Branch: refs/heads/branch-2
Commit: f9fbde3074a70ff484bea2fe8b4ecb008fe0288d
Parents: 3cf7ac1
Author: Haohui Mai whe...@apache.org
Authored: Tue Apr 7 21:23:52 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Tue Apr 7 21:30:23 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../org/apache/hadoop/hdfs/web/JsonUtil.java| 445 +
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  | 485 +++
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  30 ++
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |  28 +-
 .../namenode/ha/TestDelegationTokensWithHA.java |  92 
 .../apache/hadoop/hdfs/web/TestJsonUtil.java|  14 +-
 .../hadoop/hdfs/web/TestWebHDFSForHA.java   |  84 
 .../hadoop/hdfs/web/TestWebHdfsTokens.java  |   3 +-
 9 files changed, 625 insertions(+), 559 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9fbde30/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e969644..8290bcc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -73,6 +73,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
 can be easily overrided. (Walter Su via vinayakumarb)
 
+HDFS-8080. Separate JSON related routines used by WebHdfsFileSystem to a
+package local class. (wheat9)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f9fbde30/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index 4f36965..e25720d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -21,34 +21,22 @@ import org.apache.hadoop.fs.*;
 import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.AclStatus;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.*;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
-import org.apache.hadoop.hdfs.server.namenode.INodeId;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
-import org.codehaus.jackson.map.ObjectReader;
 
 import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 
-import java.io.ByteArrayInputStream;
-import java.io.DataInputStream;
 import java.io.IOException;
 import java.util.*;
 
 /** JSON Utilities */
 public class JsonUtil {
   private static final Object[] EMPTY_OBJECT_ARRAY = {};
-  private static final DatanodeInfo[] EMPTY_DATANODE_INFO_ARRAY = {};
 
   /** Convert a token object to a Json string. */
   public static String toJsonString(final Token? extends TokenIdentifier 
token
@@ -67,34 +55,6 @@ public class JsonUtil {
 return m;
   }
 
-  /** Convert a Json map to a Token. */
-  public static Token? extends TokenIdentifier toToken(
-  final Map?, ? m) throws IOException {
-if (m == null) {
-  return null;
-}
-
-final TokenDelegationTokenIdentifier token
-= new TokenDelegationTokenIdentifier();
-token.decodeFromUrlString((String)m.get(urlString));
-return token;
-  }
-
-  /** Convert a Json map to a Token of DelegationTokenIdentifier. */
-  @SuppressWarnings(unchecked)
-  public 

hadoop git commit: HDFS-5215. dfs.datanode.du.reserved is not considered while computing available space ( Brahma Reddy Battula via Yongjun Zhang)

2015-04-07 Thread yjzhangal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 8d3657a57 - b76c321e9


HDFS-5215. dfs.datanode.du.reserved is not considered while computing
available space ( Brahma Reddy Battula via Yongjun Zhang)

(cherry picked from commit 66763bb06f107f0e072c773a5feb25903c688ddc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b76c321e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b76c321e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b76c321e

Branch: refs/heads/branch-2
Commit: b76c321e94d99659a166659d827b922e04363179
Parents: 8d3657a
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Tue Apr 7 19:31:58 2015 -0700
Committer: Yongjun Zhang yzh...@cloudera.com
Committed: Tue Apr 7 19:36:02 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |  6 +++---
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 16 
 3 files changed, 18 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b76c321e/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 16002a6..672ac80 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -106,6 +106,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
+HDFS-5215. dfs.datanode.du.reserved is not considered while computing
+available space ( Brahma Reddy Battula via Yongjun Zhang)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b76c321e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index 6af7d92..ce0ba36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -2494,9 +2494,9 @@ class FsDatasetImpl implements FsDatasetSpiFsVolumeImpl 
{
*/
   private static class VolumeInfo {
 final String directory;
-final long usedSpace;
-final long freeSpace;
-final long reservedSpace;
+final long usedSpace; // size of space used by HDFS
+final long freeSpace; // size of free space excluding reserved space
+final long reservedSpace; // size of space reserved for non-HDFS and RBW
 
 VolumeInfo(FsVolumeImpl v, long usedSpace, long freeSpace) {
   this.directory = v.toString();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b76c321e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
index 9b0d40f..e80c174 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsVolumeImpl.java
@@ -305,9 +305,11 @@ public class FsVolumeImpl implements FsVolumeSpi {
   }
   
   /**
-   * Calculate the capacity of the filesystem, after removing any
-   * reserved capacity.
-   * @return the unreserved number of bytes left in this filesystem. May be 
zero.
+   * Return either the configured capacity of the file system if configured; or
+   * the capacity of the file system excluding space reserved for non-HDFS.
+   * 
+   * @return the unreserved number of bytes left in this filesystem. May be
+   * zero.
*/
   @VisibleForTesting
   public long getCapacity() {
@@ -329,10 +331,16 @@ public class FsVolumeImpl implements FsVolumeSpi {
 this.configuredCapacity = capacity;
   }
 
+  /*
+   * Calculate the available space of the filesystem, excluding space reserved
+   * for non-HDFS and space reserved for RBW
+   * 
+   * @return the available number of bytes 

hadoop git commit: HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to HdfsClientConfigKeys.Retry.

2015-04-07 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/trunk 66763bb06 - 4be648b55


HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to 
HdfsClientConfigKeys.Retry.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4be648b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4be648b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4be648b5

Branch: refs/heads/trunk
Commit: 4be648b55c1ce8743f6e0ea1683168e9ed9c3ee4
Parents: 66763bb
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Apr 7 19:48:57 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Tue Apr 7 19:48:57 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   |  56 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  27 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 325 +++
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |  28 +-
 .../apache/hadoop/hdfs/client/HdfsUtils.java|   3 +-
 .../hadoop/hdfs/TestBlockMissingException.java  |   3 +-
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java |   3 +-
 .../hadoop/hdfs/TestClientReportBadBlock.java   |   3 +-
 .../apache/hadoop/hdfs/TestCrcCorruption.java   |  12 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |  11 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java|   5 +-
 .../hadoop/hdfs/TestEncryptedTransfer.java  |   4 +-
 .../hadoop/hdfs/TestMissingBlocksAlert.java |   4 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  |   5 +-
 .../datatransfer/sasl/TestSaslDataTransfer.java |  11 +-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   9 +-
 .../namenode/TestListCorruptFileBlocks.java |   5 +-
 .../ha/TestFailoverWithBlockTokensEnabled.java  |   3 +-
 20 files changed, 309 insertions(+), 214 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4be648b5/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
new file mode 100644
index 000..cf2d50a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+/** Client configuration properties */
+public interface HdfsClientConfigKeys {
+  static final String PREFIX = dfs.client.;
+
+  /** Client retry configuration properties */
+  public interface Retry {
+static final String PREFIX = HdfsClientConfigKeys.PREFIX + retry.;
+
+public static final String  POLICY_ENABLED_KEY
+= PREFIX + policy.enabled;
+public static final boolean POLICY_ENABLED_DEFAULT
+= false; 
+public static final String  POLICY_SPEC_KEY
+= PREFIX + policy.spec;
+public static final String  POLICY_SPEC_DEFAULT
+= 1,6,6,10; //t1,n1,t2,n2,... 
+
+public static final String  TIMES_GET_LAST_BLOCK_LENGTH_KEY
+= PREFIX + times.get-last-block-length;
+public static final int TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT
+= 3;
+public static final String  INTERVAL_GET_LAST_BLOCK_LENGTH_KEY
+= PREFIX + interval-ms.get-last-block-length;
+public static final int INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT
+= 4000;
+
+public static final String  MAX_ATTEMPTS_KEY
+= PREFIX + max.attempts;
+public static final int MAX_ATTEMPTS_DEFAULT
+= 10;
+
+public static final String  WINDOW_BASE_KEY
+= PREFIX + window.base;
+public static final int WINDOW_BASE_DEFAULT
+= 3000;
+  }
+}


hadoop git commit: HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to HdfsClientConfigKeys.Retry.

2015-04-07 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b76c321e9 - eb3ca514f


HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to 
HdfsClientConfigKeys.Retry.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eb3ca514
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eb3ca514
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eb3ca514

Branch: refs/heads/branch-2
Commit: eb3ca514fb62706612c2bb9515b05e5061265613
Parents: b76c321
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Tue Apr 7 19:48:57 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Tue Apr 7 19:53:30 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   |  56 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  27 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 325 +++
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |  28 +-
 .../apache/hadoop/hdfs/client/HdfsUtils.java|   3 +-
 .../hadoop/hdfs/TestBlockMissingException.java  |   3 +-
 .../hadoop/hdfs/TestBlockReaderLocalLegacy.java |   3 +-
 .../hadoop/hdfs/TestClientReportBadBlock.java   |   3 +-
 .../apache/hadoop/hdfs/TestCrcCorruption.java   |  12 +-
 .../hadoop/hdfs/TestDFSClientRetries.java   |  11 +-
 .../org/apache/hadoop/hdfs/TestDFSShell.java|   5 +-
 .../hadoop/hdfs/TestEncryptedTransfer.java  |   4 +-
 .../hadoop/hdfs/TestMissingBlocksAlert.java |   4 +-
 .../java/org/apache/hadoop/hdfs/TestPread.java  |   5 +-
 .../datatransfer/sasl/TestSaslDataTransfer.java |  11 +-
 .../blockmanagement/TestBlockTokenWithDFS.java  |   3 +-
 .../hadoop/hdfs/server/namenode/TestFsck.java   |   9 +-
 .../namenode/TestListCorruptFileBlocks.java |   5 +-
 .../ha/TestFailoverWithBlockTokensEnabled.java  |   3 +-
 20 files changed, 309 insertions(+), 214 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eb3ca514/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
new file mode 100644
index 000..cf2d50a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -0,0 +1,56 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.client;
+
+/** Client configuration properties */
+public interface HdfsClientConfigKeys {
+  static final String PREFIX = dfs.client.;
+
+  /** Client retry configuration properties */
+  public interface Retry {
+static final String PREFIX = HdfsClientConfigKeys.PREFIX + retry.;
+
+public static final String  POLICY_ENABLED_KEY
+= PREFIX + policy.enabled;
+public static final boolean POLICY_ENABLED_DEFAULT
+= false; 
+public static final String  POLICY_SPEC_KEY
+= PREFIX + policy.spec;
+public static final String  POLICY_SPEC_DEFAULT
+= 1,6,6,10; //t1,n1,t2,n2,... 
+
+public static final String  TIMES_GET_LAST_BLOCK_LENGTH_KEY
+= PREFIX + times.get-last-block-length;
+public static final int TIMES_GET_LAST_BLOCK_LENGTH_DEFAULT
+= 3;
+public static final String  INTERVAL_GET_LAST_BLOCK_LENGTH_KEY
+= PREFIX + interval-ms.get-last-block-length;
+public static final int INTERVAL_GET_LAST_BLOCK_LENGTH_DEFAULT
+= 4000;
+
+public static final String  MAX_ATTEMPTS_KEY
+= PREFIX + max.attempts;
+public static final int MAX_ATTEMPTS_DEFAULT
+= 10;
+
+public static final String  WINDOW_BASE_KEY
+= PREFIX + window.base;
+public static final int WINDOW_BASE_DEFAULT
+= 3000;
+  }
+}