hadoop git commit: MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs unnecessary null strings. Contributed by Akira AJISAKA.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5b27d9e07 - 1035f550b


MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs unnecessary 
null strings. Contributed by Akira AJISAKA.

(cherry picked from commit 3ba18362f2a4b83635b89aa0adc5ebaf27d9ca83)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1035f550
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1035f550
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1035f550

Branch: refs/heads/branch-2
Commit: 1035f550b9019ed7fe6479dc37a4858aafa33d12
Parents: 5b27d9e
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 15:02:21 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 15:02:40 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt   |  3 +++
 .../hadoop/mapreduce/util/CountersStrings.java | 17 ++---
 .../org/apache/hadoop/mapred/TestCounters.java |  3 +++
 3 files changed, 8 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1035f550/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 963da70..84c5f85 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -107,6 +107,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6345. Documentation fix for when CRLA is enabled for MRAppMaster
 logs. (Rohit Agarwal via gera)
 
+MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs
+unnecessary null strings. (Akira AJISAKA via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1035f550/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
index ce799f5..ac16c12 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
@@ -151,25 +151,12 @@ public class CountersStrings {
   public static C extends Counter, G extends CounterGroupBaseC,
  T extends AbstractCountersC, G
   String toEscapedCompactString(T counters) {
-String[] groupsArray;
-int length = 0;
+StringBuilder builder = new StringBuilder();
 synchronized(counters) {
-  groupsArray = new String[counters.countCounters()];
-  int i = 0;
-  // First up, obtain the escaped string for each group so that we can
-  // determine the buffer length apriori.
   for (G group : counters) {
-String escapedString = toEscapedCompactString(group);
-groupsArray[i++] = escapedString;
-length += escapedString.length();
+builder.append(toEscapedCompactString(group));
   }
 }
-
-// Now construct the buffer
-StringBuilder builder = new StringBuilder(length);
-for (String group : groupsArray) {
-  builder.append(group);
-}
 return builder.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1035f550/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
index 46e7221..5e2763e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapred;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -84,6 +85,8 @@ public class TestCounters {
*/
   private void 

hadoop git commit: MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs unnecessary null strings. Contributed by Akira AJISAKA.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk a319771d1 - 3ba18362f


MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs unnecessary 
null strings. Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ba18362
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ba18362
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ba18362

Branch: refs/heads/trunk
Commit: 3ba18362f2a4b83635b89aa0adc5ebaf27d9ca83
Parents: a319771
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 15:02:21 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 15:02:21 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt   |  3 +++
 .../hadoop/mapreduce/util/CountersStrings.java | 17 ++---
 .../org/apache/hadoop/mapred/TestCounters.java |  3 +++
 3 files changed, 8 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ba18362/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2d87444..062042c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -359,6 +359,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6345. Documentation fix for when CRLA is enabled for MRAppMaster
 logs. (Rohit Agarwal via gera)
 
+MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs
+unnecessary null strings. (Akira AJISAKA via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ba18362/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
index ce799f5..ac16c12 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
@@ -151,25 +151,12 @@ public class CountersStrings {
   public static C extends Counter, G extends CounterGroupBaseC,
  T extends AbstractCountersC, G
   String toEscapedCompactString(T counters) {
-String[] groupsArray;
-int length = 0;
+StringBuilder builder = new StringBuilder();
 synchronized(counters) {
-  groupsArray = new String[counters.countCounters()];
-  int i = 0;
-  // First up, obtain the escaped string for each group so that we can
-  // determine the buffer length apriori.
   for (G group : counters) {
-String escapedString = toEscapedCompactString(group);
-groupsArray[i++] = escapedString;
-length += escapedString.length();
+builder.append(toEscapedCompactString(group));
   }
 }
-
-// Now construct the buffer
-StringBuilder builder = new StringBuilder(length);
-for (String group : groupsArray) {
-  builder.append(group);
-}
 return builder.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ba18362/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
index 46e7221..5e2763e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapred;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -84,6 +85,8 @@ public class TestCounters {
*/
   private void testCounter(Counters counter) throws ParseException {
 String 

hadoop git commit: MAPREDUCE-6349. Fix typo in property org.apache.hadoop.mapreduce.lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. Contributed by Ray Chiang.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1035f550b - 7e03bda3e


MAPREDUCE-6349. Fix typo in property 
org.apache.hadoop.mapreduce.lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. 
Contributed by Ray Chiang.

(cherry picked from commit bb6ef2984d8f117711b806c4ebdc757bd182c06e)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e03bda3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e03bda3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e03bda3

Branch: refs/heads/branch-2
Commit: 7e03bda3e7b07d45909b05eb6b4bfa2c48cdea66
Parents: 1035f55
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 15:36:33 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 15:36:53 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e03bda3/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 84c5f85..4ad39bc 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -110,6 +110,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs
 unnecessary null strings. (Akira AJISAKA via ozawa)
 
+MAPREDUCE-6349. Fix typo in property org.apache.hadoop.mapreduce.
+lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. (Ray Chiang via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e03bda3/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
index 1dad13e..803ece7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
@@ -68,7 +68,7 @@ public class Chain {
   protected static final String REDUCER_INPUT_KEY_CLASS = 
 mapreduce.chain.reducer.input.key.class;
   protected static final String REDUCER_INPUT_VALUE_CLASS = 
-maperduce.chain.reducer.input.value.class;
+mapreduce.chain.reducer.input.value.class;
   protected static final String REDUCER_OUTPUT_KEY_CLASS = 
 mapreduce.chain.reducer.output.key.class;
   protected static final String REDUCER_OUTPUT_VALUE_CLASS = 



hadoop git commit: MAPREDUCE-6349. Fix typo in property org.apache.hadoop.mapreduce.lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. Contributed by Ray Chiang.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3ba18362f - bb6ef2984


MAPREDUCE-6349. Fix typo in property 
org.apache.hadoop.mapreduce.lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. 
Contributed by Ray Chiang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb6ef298
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb6ef298
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb6ef298

Branch: refs/heads/trunk
Commit: bb6ef2984d8f117711b806c4ebdc757bd182c06e
Parents: 3ba1836
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 15:36:33 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 15:36:33 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt  | 3 +++
 .../main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java| 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6ef298/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 062042c..19f95fc 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -362,6 +362,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs
 unnecessary null strings. (Akira AJISAKA via ozawa)
 
+MAPREDUCE-6349. Fix typo in property org.apache.hadoop.mapreduce.
+lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. (Ray Chiang via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb6ef298/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
index 1dad13e..803ece7 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/chain/Chain.java
@@ -68,7 +68,7 @@ public class Chain {
   protected static final String REDUCER_INPUT_KEY_CLASS = 
 mapreduce.chain.reducer.input.key.class;
   protected static final String REDUCER_INPUT_VALUE_CLASS = 
-maperduce.chain.reducer.input.value.class;
+mapreduce.chain.reducer.input.value.class;
   protected static final String REDUCER_OUTPUT_KEY_CLASS = 
 mapreduce.chain.reducer.output.key.class;
   protected static final String REDUCER_OUTPUT_VALUE_CLASS = 



hadoop git commit: HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when native code is not loaded. Contributed by Zhijie Shen.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk bb6ef2984 - 76fa606e2


HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when 
native code is not loaded. Contributed by Zhijie Shen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76fa606e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76fa606e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76fa606e

Branch: refs/heads/trunk
Commit: 76fa606e2d3d04407f2f6b4ea276cce0f60db4be
Parents: bb6ef29
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 17:05:00 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 17:05:00 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java  | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76fa606e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ea3316a..bb8f900 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -616,6 +616,9 @@ Release 2.7.1 - UNRELEASED
 
 HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
 
+HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when
+native code is not loaded. (Zhijie Shen via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76fa606e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 8d2fa1a..2a9c5d0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,7 +60,8 @@ public class SnappyCodec implements Configurable, 
CompressionCodec, DirectDecomp
* Are the native snappy libraries loaded  initialized?
*/
   public static void checkNativeCodeLoaded() {
-  if (!NativeCodeLoader.buildSupportsSnappy()) {
+  if (!NativeCodeLoader.isNativeCodeLoaded() ||
+  !NativeCodeLoader.buildSupportsSnappy()) {
 throw new RuntimeException(native snappy library not available:  +
 this version of libhadoop was built without  +
 snappy support.);



hadoop git commit: HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when native code is not loaded. Contributed by Zhijie Shen.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 7e03bda3e - 1e1ebbb42


HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when 
native code is not loaded. Contributed by Zhijie Shen.

(cherry picked from commit 76fa606e2d3d04407f2f6b4ea276cce0f60db4be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1e1ebbb4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1e1ebbb4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1e1ebbb4

Branch: refs/heads/branch-2
Commit: 1e1ebbb42bcea22901fd8277c190d2852147d10d
Parents: 7e03bda
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 17:05:00 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 17:05:53 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java  | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e1ebbb4/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 7bac3c9..0c55598 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -164,6 +164,9 @@ Release 2.7.1 - UNRELEASED
 
 HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
 
+HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when
+native code is not loaded. (Zhijie Shen via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1e1ebbb4/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 8d2fa1a..2a9c5d0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,7 +60,8 @@ public class SnappyCodec implements Configurable, 
CompressionCodec, DirectDecomp
* Are the native snappy libraries loaded  initialized?
*/
   public static void checkNativeCodeLoaded() {
-  if (!NativeCodeLoader.buildSupportsSnappy()) {
+  if (!NativeCodeLoader.isNativeCodeLoaded() ||
+  !NativeCodeLoader.buildSupportsSnappy()) {
 throw new RuntimeException(native snappy library not available:  +
 this version of libhadoop was built without  +
 snappy support.);



hadoop git commit: HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when native code is not loaded. Contributed by Zhijie Shen.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 3389bac42 - 919cbc19e


HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when 
native code is not loaded. Contributed by Zhijie Shen.

(cherry picked from commit 76fa606e2d3d04407f2f6b4ea276cce0f60db4be)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/919cbc19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/919cbc19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/919cbc19

Branch: refs/heads/branch-2.7
Commit: 919cbc19e94482f885c244dfd16acf1132560bcf
Parents: 3389bac
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 17:05:00 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 17:06:08 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java  | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/919cbc19/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 49168c7..b34adec 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -26,6 +26,9 @@ Release 2.7.1 - UNRELEASED
 
 HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
 
+HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when
+native code is not loaded. (Zhijie Shen via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/919cbc19/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 8d2fa1a..2a9c5d0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,7 +60,8 @@ public class SnappyCodec implements Configurable, 
CompressionCodec, DirectDecomp
* Are the native snappy libraries loaded  initialized?
*/
   public static void checkNativeCodeLoaded() {
-  if (!NativeCodeLoader.buildSupportsSnappy()) {
+  if (!NativeCodeLoader.isNativeCodeLoaded() ||
+  !NativeCodeLoader.buildSupportsSnappy()) {
 throw new RuntimeException(native snappy library not available:  +
 this version of libhadoop was built without  +
 snappy support.);



hadoop git commit: HADOOP-11328. ZKFailoverController does not log Exception when doRun raises errors. Contributed by Tianyin Xu.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 76fa606e2 - bb9ddef0e


HADOOP-11328. ZKFailoverController does not log Exception when doRun raises 
errors. Contributed by Tianyin Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bb9ddef0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bb9ddef0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bb9ddef0

Branch: refs/heads/trunk
Commit: bb9ddef0e7603b60d25250bb53a7ae9f147cd3cd
Parents: 76fa606
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 17:47:37 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 17:47:37 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb9ddef0/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index bb8f900..2bf790a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -525,6 +525,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11357. Print information of the build enviornment in test-patch.sh
 (aw)
 
+HADOOP-11328. ZKFailoverController does not log Exception when doRun raises
+errors. (Tianyin Xu via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bb9ddef0/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index f58c3f4..9eb1ff8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -180,6 +180,7 @@ public abstract class ZKFailoverController {
 }
   });
 } catch (RuntimeException rte) {
+  LOG.fatal(The failover controller encounters runtime error:  + rte);
   throw (Exception)rte.getCause();
 }
   }



hadoop git commit: HADOOP-11328. ZKFailoverController does not log Exception when doRun raises errors. Contributed by Tianyin Xu.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 1e1ebbb42 - c64309f6a


HADOOP-11328. ZKFailoverController does not log Exception when doRun raises 
errors. Contributed by Tianyin Xu.

(cherry picked from commit bb9ddef0e7603b60d25250bb53a7ae9f147cd3cd)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c64309f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c64309f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c64309f6

Branch: refs/heads/branch-2
Commit: c64309f6ab6ba282420b11078941ca5512a0eff9
Parents: 1e1ebbb
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 17:47:37 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 17:47:52 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/ha/ZKFailoverController.java  | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c64309f6/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0c55598..945c9db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -67,6 +67,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11357. Print information of the build enviornment in test-patch.sh
 (aw)
 
+HADOOP-11328. ZKFailoverController does not log Exception when doRun raises
+errors. (Tianyin Xu via ozawa)
+
   OPTIMIZATIONS
 
 HADOOP-11785. Reduce the number of listStatus operation in distcp

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c64309f6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
index f58c3f4..9eb1ff8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
@@ -180,6 +180,7 @@ public abstract class ZKFailoverController {
 }
   });
 } catch (RuntimeException rte) {
+  LOG.fatal(The failover controller encounters runtime error:  + rte);
   throw (Exception)rte.getCause();
 }
   }



hadoop git commit: YARN-3097. Logging of resource recovery on NM restart has redundancies. Contributed by Eric Payne

2015-05-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk bb9ddef0e - 8f65c793f


YARN-3097. Logging of resource recovery on NM restart has redundancies. 
Contributed by Eric Payne


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f65c793
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f65c793
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f65c793

Branch: refs/heads/trunk
Commit: 8f65c793f2930bfd16885a2ab188a9970b754974
Parents: bb9ddef
Author: Jason Lowe jl...@apache.org
Authored: Mon May 4 15:31:15 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon May 4 15:31:15 2015 +

--
 hadoop-yarn-project/CHANGES.txt| 2 ++
 .../localizer/ResourceLocalizationService.java | 6 --
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f65c793/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 684efc5..08762e3 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -296,6 +296,8 @@ Release 2.8.0 - UNRELEASED
 YARN-1993. Cross-site scripting vulnerability in TextView.java. (Kenji 
Kikushima
 via ozawa)
 
+YARN-3097. Logging of resource recovery on NM restart has redundancies
+(Eric Payne via jlowe)
 
 Release 2.7.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f65c793/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index cdd252c..e9c45f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -303,8 +303,10 @@ public class ResourceLocalizationService extends 
CompositeService
 for (LocalizedResourceProto proto : state.getLocalizedResources()) {
   LocalResource rsrc = new LocalResourcePBImpl(proto.getResource());
   LocalResourceRequest req = new LocalResourceRequest(rsrc);
-  LOG.info(Recovering localized resource  + req +  at 
-  + proto.getLocalPath());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(Recovering localized resource  + req +  at 
++ proto.getLocalPath());
+  }
   tracker.handle(new ResourceRecoveredEvent(req,
   new Path(proto.getLocalPath()), proto.getSize()));
 }



hadoop git commit: YARN-3097. Logging of resource recovery on NM restart has redundancies. Contributed by Eric Payne (cherry picked from commit 8f65c793f2930bfd16885a2ab188a9970b754974)

2015-05-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c64309f6a - 99b49a818


YARN-3097. Logging of resource recovery on NM restart has redundancies. 
Contributed by Eric Payne
(cherry picked from commit 8f65c793f2930bfd16885a2ab188a9970b754974)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99b49a81
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99b49a81
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99b49a81

Branch: refs/heads/branch-2
Commit: 99b49a818154c7ed4bec5762816c01d4da3596b9
Parents: c64309f
Author: Jason Lowe jl...@apache.org
Authored: Mon May 4 15:31:15 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon May 4 10:32:33 2015 -0500

--
 hadoop-yarn-project/CHANGES.txt| 2 ++
 .../localizer/ResourceLocalizationService.java | 6 --
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b49a81/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ec4c6a5..cceb578 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -248,6 +248,8 @@ Release 2.8.0 - UNRELEASED
 YARN-1993. Cross-site scripting vulnerability in TextView.java. (Kenji 
Kikushima
 via ozawa)
 
+YARN-3097. Logging of resource recovery on NM restart has redundancies
+(Eric Payne via jlowe)
 
 Release 2.7.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/99b49a81/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
index cdd252c..e9c45f3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java
@@ -303,8 +303,10 @@ public class ResourceLocalizationService extends 
CompositeService
 for (LocalizedResourceProto proto : state.getLocalizedResources()) {
   LocalResource rsrc = new LocalResourcePBImpl(proto.getResource());
   LocalResourceRequest req = new LocalResourceRequest(rsrc);
-  LOG.info(Recovering localized resource  + req +  at 
-  + proto.getLocalPath());
+  if (LOG.isDebugEnabled()) {
+LOG.debug(Recovering localized resource  + req +  at 
++ proto.getLocalPath());
+  }
   tracker.handle(new ResourceRecoveredEvent(req,
   new Path(proto.getLocalPath()), proto.getSize()));
 }



[13/50] hadoop git commit: HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R

2015-05-04 Thread zhz
HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76dcb47e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76dcb47e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76dcb47e

Branch: refs/heads/HDFS-7285
Commit: 76dcb47e2ab88ed48a38ecb15a43dc23d825313a
Parents: 515deb9
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 04:31:48 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:22 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  2 ++
 .../hadoop/io/erasurecode/SchemaLoader.java  | 12 ++--
 .../io/erasurecode/coder/RSErasureDecoder.java   | 19 ++-
 .../io/erasurecode/coder/RSErasureEncoder.java   | 19 ++-
 .../io/erasurecode/coder/XORErasureDecoder.java  |  2 +-
 .../io/erasurecode/rawcoder/util/RSUtil.java | 17 +
 6 files changed, 62 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76dcb47e/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c72394e..b850e11 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -40,3 +40,5 @@
 
 HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
 ( Kai Zheng via vinayakumarb )
+  
+HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76dcb47e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
index c51ed37..75dd03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.w3c.dom.*;
@@ -36,7 +36,7 @@ import java.util.*;
  * A EC schema loading utility that loads predefined EC schemas from XML file
  */
 public class SchemaLoader {
-  private static final Log LOG = 
LogFactory.getLog(SchemaLoader.class.getName());
+  private static final Logger LOG = 
LoggerFactory.getLogger(SchemaLoader.class.getName());
 
   /**
* Load predefined ec schemas from configuration file. This file is
@@ -63,7 +63,7 @@ public class SchemaLoader {
   private ListECSchema loadSchema(File schemaFile)
   throws ParserConfigurationException, IOException, SAXException {
 
-LOG.info(Loading predefined EC schema file  + schemaFile);
+LOG.info(Loading predefined EC schema file {}, schemaFile);
 
 // Read and parse the schema file.
 DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@@ -87,7 +87,7 @@ public class SchemaLoader {
   ECSchema schema = loadSchema(element);
 schemas.add(schema);
 } else {
-  LOG.warn(Bad element in EC schema configuration file:  +
+  LOG.warn(Bad element in EC schema configuration file: {},
   element.getTagName());
 }
   }
@@ -109,7 +109,7 @@ public class SchemaLoader {
   URL url = Thread.currentThread().getContextClassLoader()
   .getResource(schemaFilePath);
   if (url == null) {
-LOG.warn(schemaFilePath +  not found on the classpath.);
+LOG.warn({} not found on the classpath., schemaFilePath);
 schemaFile = null;
   } else if (! url.getProtocol().equalsIgnoreCase(file)) {
 throw new RuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76dcb47e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 

[02/50] hadoop git commit: HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure code ( Contributed by Kai Zheng)

2015-05-04 Thread zhz
HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure 
code ( Contributed by Kai Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c284ac0a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c284ac0a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c284ac0a

Branch: refs/heads/HDFS-7285
Commit: c284ac0a7f2ac3c2e38f7470694b1d4d2109c1b1
Parents: 8f01376
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 16:05:22 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:11:32 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java | 18 
 .../erasurecode/codec/AbstractErasureCodec.java | 88 +++
 .../io/erasurecode/codec/ErasureCodec.java  | 56 
 .../io/erasurecode/codec/RSErasureCodec.java| 38 +
 .../io/erasurecode/codec/XORErasureCodec.java   | 45 ++
 .../erasurecode/coder/AbstractErasureCoder.java |  7 ++
 .../io/erasurecode/coder/ErasureCoder.java  |  7 ++
 .../io/erasurecode/grouper/BlockGrouper.java| 90 
 9 files changed, 352 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c284ac0a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7716728..c72394e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -37,3 +37,6 @@
 
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
+( Kai Zheng via vinayakumarb )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c284ac0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
index 2c851a5..0a86907 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
@@ -79,4 +79,22 @@ public class ECBlockGroup {
 return false;
   }
 
+  /**
+   * Get erased blocks count
+   * @return
+   */
+  public int getErasedCount() {
+int erasedCount = 0;
+
+for (ECBlock dataBlock : dataBlocks) {
+  if (dataBlock.isErased()) erasedCount++;
+}
+
+for (ECBlock parityBlock : parityBlocks) {
+  if (parityBlock.isErased()) erasedCount++;
+}
+
+return erasedCount;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c284ac0a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
new file mode 100644
index 000..9993786
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.coder.*;
+import 

[11/50] hadoop git commit: HDFS-8104 Make hard-coded values consistent with the system default schema first before remove them. Contributed by Kai Zheng

2015-05-04 Thread zhz
HDFS-8104 Make hard-coded values consistent with the system default schema 
first before remove them. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/515deb9e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/515deb9e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/515deb9e

Branch: refs/heads/HDFS-7285
Commit: 515deb9e72f670d05bf69da7f96ab715e1783d02
Parents: 3e4c3dd
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 00:16:28 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:17 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../hadoop/hdfs/TestPlanReadPortions.java   | 142 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 112 ---
 3 files changed, 145 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/515deb9e/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5078a15..1e695c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -54,4 +54,6 @@
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
-HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
\ No newline at end of file
+HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
+
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/515deb9e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
new file mode 100644
index 000..cf84b30
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.junit.Test;
+
+import static org.apache.hadoop.hdfs.DFSStripedInputStream.ReadPortion;
+import static org.junit.Assert.*;
+
+public class TestPlanReadPortions {
+
+  // We only support this as num of data blocks. It might be good enough for 
now
+  // for the purpose, even not flexible yet for any number in a schema.
+  private final short GROUP_SIZE = 3;
+  private final int CELLSIZE = 128 * 1024;
+
+  private void testPlanReadPortions(int startInBlk, int length,
+  int bufferOffset, int[] readLengths, int[] offsetsInBlock,
+  int[][] bufferOffsets, int[][] bufferLengths) {
+ReadPortion[] results = DFSStripedInputStream.planReadPortions(GROUP_SIZE,
+CELLSIZE, startInBlk, length, bufferOffset);
+assertEquals(GROUP_SIZE, results.length);
+
+for (int i = 0; i  GROUP_SIZE; i++) {
+  assertEquals(readLengths[i], results[i].getReadLength());
+  assertEquals(offsetsInBlock[i], results[i].getStartOffsetInBlock());
+  final int[] bOffsets = results[i].getOffsets();
+  assertArrayEquals(bufferOffsets[i], bOffsets);
+  final int[] bLengths = results[i].getLengths();
+  assertArrayEquals(bufferLengths[i], bLengths);
+}
+  }
+
+  /**
+   * Test {@link DFSStripedInputStream#planReadPortions}
+   */
+  @Test
+  public void testPlanReadPortions() {
+/**
+ * start block offset is 0, read cellSize - 10
+ */
+testPlanReadPortions(0, CELLSIZE - 10, 0,
+new int[]{CELLSIZE - 10, 0, 0}, new int[]{0, 0, 0},
+new int[][]{new int[]{0}, new int[]{}, new int[]{}},
+  

[09/50] hadoop git commit: HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng

2015-05-04 Thread zhz
HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e4c3dd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e4c3dd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e4c3dd6

Branch: refs/heads/HDFS-7285
Commit: 3e4c3dd6e25a7590119f2884aed024042402fd8c
Parents: 736202e
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 9 01:30:02 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:07 2015 -0700

--
 .../src/main/conf/ecschema-def.xml  |  5 --
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 57 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 +-
 .../hdfs/server/namenode/ECSchemaManager.java   | 62 
 4 files changed, 120 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e4c3dd6/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
index e619485..e36d386 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -27,11 +27,6 @@ You can modify and remove those not used yet, or add new 
ones.
 --
 
 schemas
-  schema name=RS-6-3
-k6/k
-m3/m
-codecRS/codec
-  /schema
   schema name=RS-10-4
 k10/k
 m4/m

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e4c3dd6/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 27be00e..8c3310e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -23,12 +23,12 @@ import java.util.Map;
 /**
  * Erasure coding schema to housekeeper relevant information.
  */
-public class ECSchema {
+public final class ECSchema {
   public static final String NUM_DATA_UNITS_KEY = k;
   public static final String NUM_PARITY_UNITS_KEY = m;
   public static final String CODEC_NAME_KEY = codec;
   public static final String CHUNK_SIZE_KEY = chunkSize;
-  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+  public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
   private String schemaName;
   private String codecName;
@@ -82,6 +82,18 @@ public class ECSchema {
   }
 
   /**
+   * Constructor with key parameters provided.
+   * @param schemaName
+   * @param codecName
+   * @param numDataUnits
+   * @param numParityUnits
+   */
+  public ECSchema(String schemaName, String codecName,
+  int numDataUnits, int numParityUnits) {
+this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  }
+
+  /**
* Constructor with key parameters provided. Note the options may contain
* additional information for the erasure codec to interpret further.
* @param schemaName
@@ -200,4 +212,45 @@ public class ECSchema {
 
 return sb.toString();
   }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+ECSchema ecSchema = (ECSchema) o;
+
+if (numDataUnits != ecSchema.numDataUnits) {
+  return false;
+}
+if (numParityUnits != ecSchema.numParityUnits) {
+  return false;
+}
+if (chunkSize != ecSchema.chunkSize) {
+  return false;
+}
+if (!schemaName.equals(ecSchema.schemaName)) {
+  return false;
+}
+if (!codecName.equals(ecSchema.codecName)) {
+  return false;
+}
+return options.equals(ecSchema.options);
+  }
+
+  @Override
+  public int hashCode() {
+int result = schemaName.hashCode();
+result = 31 * result + codecName.hashCode();
+result = 31 * result + options.hashCode();
+result = 31 * result + numDataUnits;
+result = 31 * result + numParityUnits;
+result = 31 * result + chunkSize;
+
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e4c3dd6/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 

[14/50] hadoop git commit: HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of ECZone. Contributed by Vinayakumar B.

2015-05-04 Thread zhz
HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of 
ECZone. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c743e892
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c743e892
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c743e892

Branch: refs/heads/HDFS-7285
Commit: c743e89286aeec2f815a97ed87a62a71e7ec9424
Parents: 6c9a57b
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 11:08:57 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:23 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 ++--
 .../hadoop/hdfs/DistributedFileSystem.java  | 33 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  6 ++--
 ...tNamenodeProtocolServerSideTranslatorPB.java |  4 ++-
 .../ClientNamenodeProtocolTranslatorPB.java |  5 ++-
 .../namenode/ErasureCodingZoneManager.java  | 30 +-
 .../hdfs/server/namenode/FSDirectory.java   | 22 -
 .../hdfs/server/namenode/FSNamesystem.java  | 19 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 ++--
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  2 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 18 +--
 .../server/namenode/TestAddStripedBlocks.java   |  2 +-
 .../server/namenode/TestFSEditLogLoader.java|  4 +--
 .../hdfs/server/namenode/TestFSImage.java   |  4 +--
 16 files changed, 112 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c743e892/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 197e664..2845223 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1321,7 +1321,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
  Progressable progress,
  int buffersize,
  ChecksumOpt checksumOpt) throws IOException {
-return create(src, permission, flag, createParent, replication, blockSize, 
+return create(src, permission, flag, createParent, replication, blockSize,
 progress, buffersize, checksumOpt, null);
   }
 
@@ -2972,12 +2972,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
-  public void createErasureCodingZone(String src)
+  public void createErasureCodingZone(String src, ECSchema schema)
   throws IOException {
 checkOpen();
 TraceScope scope = getPathTraceScope(createErasureCodingZone, src);
 try {
-  namenode.createErasureCodingZone(src);
+  namenode.createErasureCodingZone(src, schema);
 } catch (RemoteException re) {
   throw re.unwrapRemoteException(AccessControlException.class,
   SafeModeException.class,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c743e892/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4ca6d57..4c3e0a5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -86,6 +86,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
@@ -2264,4 +2265,36 @@ public class DistributedFileSystem extends FileSystem {
   throws IOException {
 return dfs.getInotifyEventStream(lastReadTxid);
   }
+
+  /**
+   * Create the erasurecoding zone
+   * 
+   * @param path Directory to create the ec zone
+   * @param 

[07/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-05-04 Thread zhz
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db6c56ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db6c56ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db6c56ef

Branch: refs/heads/HDFS-7285
Commit: db6c56ef7031d4eea1d1e656ba0a88218ece3097
Parents: fb38650
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:11:46 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db6c56ef/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 5ad043b..cc6a7a1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3167,7 +3167,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
-  
+
   /**
* Create thread pool for parallel reading in striped layout,
* STRIPED_READ_THREAD_POOL, if it does not already exist.



[18/50] hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-05-04 Thread zhz
HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/012e194c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/012e194c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/012e194c

Branch: refs/heads/HDFS-7285
Commit: 012e194c6ccd1675050ff5d0df75f651e12bb510
Parents: 2d58f36
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:23:07 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:24 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 15 +++
 1 file changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/012e194c/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 07bbd4a..9fdac98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -51,11 +51,20 @@
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 
+HDFS-7969. Erasure coding: NameNode support for lease recovery of striped
+block groups. (Zhe Zhang)
+
+HDFS-7782. Erasure coding: pread from files in striped layout.
+(Zhe Zhang and Jing Zhao via Zhe Zhang)
+
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
+HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks.
+(Jing Zhao and Zhe Zhang via Jing Zhao)
+
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
@@ -63,5 +72,11 @@
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
 ECSchemas loaded in Namenode. (vinayakumarb)
 
+HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of 
ECZone.
+(Vinayakumar B via Zhe Zhang)
+
+HDFS-8114. Erasure coding: Add auditlog 
FSNamesystem#createErasureCodingZone if this
+operation fails. (Rakesh R via Zhe Zhang)
+
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
 separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file



[08/50] hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B)

2015-05-04 Thread zhz
HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/48f975a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/48f975a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/48f975a2

Branch: refs/heads/HDFS-7285
Commit: 48f975a2f8841b7e8948361aa5153e4b2975c97c
Parents: db6c56e
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 12:48:59 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:01 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 ++-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 14 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 
 .../ClientNamenodeProtocolTranslatorPB.java | 18 
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 46 
 .../hdfs/server/namenode/FSNamesystem.java  | 31 +
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +++
 .../src/main/proto/ClientNamenodeProtocol.proto | 10 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 28 
 .../hadoop/hdfs/TestErasureCodingZones.java | 38 +++-
 11 files changed, 223 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/48f975a2/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9927ccf..7423033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,4 +49,7 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
\ No newline at end of file
+manage EC zones (Zhe Zhang)
+
+HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
+NameNode (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48f975a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index cc6a7a1..8415ac8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -118,6 +118,7 @@ import 
org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -3097,6 +3098,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECInfo getErasureCodingInfo(String src) throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(getErasureCodingInfo, src);
+try {
+  return namenode.getErasureCodingInfo(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  FileNotFoundException.class, UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException 
{
 return new DFSInotifyEventInputStream(traceSampler, namenode);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/48f975a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index a94caad..d27dac7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1464,4 +1464,14 @@ public interface ClientProtocol {
*/
   @Idempotent
   public EventBatchList getEditsFromTxid(long txid) 

[19/50] hadoop git commit: HDFS-8123. Erasure Coding: Better to move EC related proto messages to a separate erasurecoding proto file (Contrubuted by Rakesh R)

2015-05-04 Thread zhz
HDFS-8123. Erasure Coding: Better to move EC related proto messages to a 
separate erasurecoding proto file (Contrubuted by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d58f36e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d58f36e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d58f36e

Branch: refs/heads/HDFS-7285
Commit: 2d58f36e0b4ede36dc6b70300bdc69474df89c6d
Parents: f5e5022
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:09:16 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:24 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 12 ++--
 .../ClientNamenodeProtocolTranslatorPB.java | 13 ++--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  6 +-
 .../namenode/ErasureCodingZoneManager.java  |  2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto | 24 +--
 .../src/main/proto/erasurecoding.proto  | 74 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 27 ---
 9 files changed, 96 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d58f36e/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5250dfa..07bbd4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -61,4 +61,7 @@
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
 
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
-ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file
+ECSchemas loaded in Namenode. (vinayakumarb)
+
+HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d58f36e/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c11b963..a13a2bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -343,6 +343,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   includehdfs.proto/include
   includeencryption.proto/include
   includeinotify.proto/include
+  includeerasurecoding.proto/include
 /includes
   /source
   
output${project.build.directory}/generated-sources/java/output

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d58f36e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index d103cf0..17141c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -107,12 +107,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetErasureCodingInfoRequestProto;
-import 

[04/50] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-05-04 Thread zhz
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d37f00e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d37f00e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d37f00e2

Branch: refs/heads/HDFS-7285
Commit: d37f00e2a3d894a4934d999e3718664630c84bdf
Parents: debec6f
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:31:46 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:11:32 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37f00e2/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 01280db..68d1d32 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -32,3 +32,6 @@
 
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
+
+HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
+( Kai Zheng )



[24/50] hadoop git commit: HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block is a striped block. Contributed by Hui Zheng.

2015-05-04 Thread zhz
HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block 
is a striped block. Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d8252893
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d8252893
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d8252893

Branch: refs/heads/HDFS-7285
Commit: d8252893b7a60e58d324620181716d629ea4dfed
Parents: 4c3cd82
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 17 12:05:31 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:25 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  |  2 ++
 .../hdfs/server/blockmanagement/BlockManager.java | 18 --
 2 files changed, 6 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8252893/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 78ca6d3..0ed61cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -85,3 +85,5 @@
 
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
+
+HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d8252893/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4c3a007..01422db 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2925,15 +2925,6 @@ public class BlockManager {
   }
 
   /**
-   * Set the value of whether there are any non-EC blocks using StripedID.
-   *
-   * @param has - the value of whether there are any non-EC blocks using 
StripedID.
-   */
-  public void hasNonEcBlockUsingStripedID(boolean has){
-hasNonEcBlockUsingStripedID = has;
-  }
-
-  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
@@ -3529,7 +3520,7 @@ public class BlockManager {
 if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
   info = blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId(;
-  if ((info == null)  hasNonEcBlockUsingStripedID()){
+  if ((info == null)  hasNonEcBlockUsingStripedID){
 info = blocksMap.getStoredBlock(block);
   }
 } else {
@@ -3713,10 +3704,9 @@ public class BlockManager {
*/
   public BlockInfo addBlockCollectionWithCheck(
   BlockInfo block, BlockCollection bc) {
-if (!hasNonEcBlockUsingStripedID()){
-  if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
-hasNonEcBlockUsingStripedID(true);
-  }
+if (!hasNonEcBlockUsingStripedID  !block.isStriped() 
+BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  hasNonEcBlockUsingStripedID = true;
 }
 return addBlockCollection(block, bc);
   }



[23/50] hadoop git commit: HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar B)

2015-05-04 Thread zhz
HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar 
B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a969f869
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a969f869
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a969f869

Branch: refs/heads/HDFS-7285
Commit: a969f869619164e7e3f16e5b64ef182f104a
Parents: 012e194
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 16:38:22 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:25 2015 -0700

--
 .../main/java/org/apache/hadoop/fs/FsShell.java |   8 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  18 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  32 +++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   9 +
 .../apache/hadoop/hdfs/protocol/ECZoneInfo.java |  56 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  18 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  19 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  12 ++
 .../namenode/ErasureCodingZoneManager.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |  10 +
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../hadoop/hdfs/tools/erasurecode/ECCli.java|  48 +
 .../hdfs/tools/erasurecode/ECCommand.java   | 209 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +
 .../src/main/proto/erasurecoding.proto  |  15 ++
 18 files changed, 502 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a969f869/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index db73f6d..f873a01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -111,6 +111,10 @@ public class FsShell extends Configured implements Tool {
 return getTrash().getCurrentTrashDir();
   }
 
+  protected String getUsagePrefix() {
+return usagePrefix;
+  }
+
   // NOTE: Usage/Help are inner classes to allow access to outer methods
   // that access commandFactory
   
@@ -194,7 +198,7 @@ public class FsShell extends Configured implements Tool {
   }
 } else {
   // display help or usage for all commands 
-  out.println(usagePrefix);
+  out.println(getUsagePrefix());
   
   // display list of short usages
   ArrayListCommand instances = new ArrayListCommand();
@@ -218,7 +222,7 @@ public class FsShell extends Configured implements Tool {
   }
 
   private void printInstanceUsage(PrintStream out, Command instance) {
-out.println(usagePrefix +   + instance.getUsage());
+out.println(getUsagePrefix() +   + instance.getUsage());
   }
 
   private void printInstanceHelp(PrintStream out, Command instance) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a969f869/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9fdac98..b9fc6fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -79,4 +79,6 @@
 operation fails. (Rakesh R via Zhe Zhang)
 
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
-separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
+
+HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a969f869/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index f464261..84c79b8 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -134,6 +134,11 @@ case ${COMMAND} in
 hadoop_debug Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS
 HADOOP_OPTS=${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}
   ;;
+  erasurecode)
+

[25/50] hadoop git commit: HDFS-7937. Erasure Coding: INodeFile quota computation unit tests. Contributed by Kai Sasaki.

2015-05-04 Thread zhz
HDFS-7937. Erasure Coding: INodeFile quota computation unit tests. Contributed 
by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71b69ec9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71b69ec9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71b69ec9

Branch: refs/heads/HDFS-7285
Commit: 71b69ec9d2584db85b709d7907cd1fea533e0b8e
Parents: 567105e
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 18:07:07 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:26 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  23 +-
 .../server/namenode/TestStripedINodeFile.java   | 229 +++
 2 files changed, 250 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b69ec9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 20b0c5c..9f2f5ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -203,8 +203,27 @@ public class BlockInfoStriped extends BlockInfo {
 // In case striped blocks, total usage by this striped blocks should
 // be the total of data blocks and parity blocks because
 // `getNumBytes` is the total of actual data block size.
-return ((getNumBytes() - 1) / (dataBlockNum * BLOCK_STRIPED_CELL_SIZE) + 1)
-* BLOCK_STRIPED_CELL_SIZE * parityBlockNum + getNumBytes();
+
+// 0. Calculate the total bytes per stripes Num Bytes per Stripes
+long numBytesPerStripe = dataBlockNum * BLOCK_STRIPED_CELL_SIZE;
+if (getNumBytes() % numBytesPerStripe == 0) {
+  return getNumBytes() / dataBlockNum * getTotalBlockNum();
+}
+// 1. Calculate the number of stripes in this block group. Num Stripes
+long numStripes = (getNumBytes() - 1) / numBytesPerStripe + 1;
+// 2. Calculate the parity cell length in the last stripe. Note that the
+//size of parity cells should equal the size of the first cell, if it
+//is not full. Last Stripe Parity Cell Length
+long lastStripeParityCellLen = Math.min(getNumBytes() % numBytesPerStripe,
+BLOCK_STRIPED_CELL_SIZE);
+// 3. Total consumed space is the total of
+// - The total of the full cells of data blocks and parity blocks.
+// - The remaining of data block which does not make a stripe.
+// - The last parity block cells. These size should be same
+//   to the first cell in this stripe.
+return getTotalBlockNum() * (BLOCK_STRIPED_CELL_SIZE * (numStripes - 1))
++ getNumBytes() % numBytesPerStripe
++ lastStripeParityCellLen * parityBlockNum;
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71b69ec9/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
new file mode 100644
index 000..d251c30
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStripedINodeFile.java
@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode;
+
+import static org.junit.Assert.assertEquals;
+import static 

[44/50] hadoop git commit: HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open. Contributed by Kai Sasaki.

2015-05-04 Thread zhz
HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open. 
Contributed by Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f53dc38
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f53dc38
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f53dc38

Branch: refs/heads/HDFS-7285
Commit: 4f53dc38e42e754489e702d43cbab4cb5c609da7
Parents: 91ff556
Author: Jing Zhao ji...@apache.org
Authored: Tue Apr 28 13:42:24 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:31 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 -
 .../main/java/org/apache/hadoop/hdfs/DFSClient.java |  7 ++-
 .../apache/hadoop/hdfs/DFSStripedInputStream.java   |  5 +++--
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 16 +++-
 .../org/apache/hadoop/hdfs/TestReadStripedFile.java | 11 ---
 5 files changed, 28 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f53dc38/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 6c5d7ce..9b4bf24 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -139,4 +139,7 @@
 commands from standbynode if any (vinayakumarb)
 
 HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated
-as Idempotent (vinayakumarb)
\ No newline at end of file
+as Idempotent (vinayakumarb)
+
+HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open.
+(Kai Sasaki via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f53dc38/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 2b6d454..cfa14b8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1181,7 +1181,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 //Get block info from namenode
 TraceScope scope = getPathTraceScope(newDFSInputStream, src);
 try {
-  return new DFSInputStream(this, src, verifyChecksum);
+  ECInfo info = getErasureCodingInfo(src);
+  if (info != null) {
+return new DFSStripedInputStream(this, src, verifyChecksum, info);
+  } else {
+return new DFSInputStream(this, src, verifyChecksum);
+  }
 } finally {
   scope.close();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f53dc38/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index fe9e101..f6f7ed2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -134,11 +134,12 @@ public class DFSStripedInputStream extends DFSInputStream 
{
   private final short parityBlkNum;
   private final ECInfo ecInfo;
 
-  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum)
+  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum, ECInfo info)
   throws IOException {
 super(dfsClient, src, verifyChecksum);
 // ECInfo is restored from NN just before reading striped file.
-ecInfo = dfsClient.getErasureCodingInfo(src);
+assert info != null;
+ecInfo = info;
 cellSize = ecInfo.getSchema().getChunkSize();
 dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
 parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f53dc38/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSStripedInputStream.java
 

[29/50] hadoop git commit: HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. Contributed by Jing Zhao.

2015-05-04 Thread zhz
HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18a2e1e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18a2e1e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18a2e1e2

Branch: refs/heads/HDFS-7285
Commit: 18a2e1e2f314be8aa00516b7779855becc565f02
Parents: 71b69ec
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 18:13:47 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:27 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  7 --
 .../namenode/ErasureCodingZoneManager.java  | 12 +-
 .../hdfs/server/namenode/FSDirectory.java   |  6 ++---
 .../hdfs/server/namenode/FSEditLogLoader.java   | 13 ++-
 .../hdfs/server/namenode/FSImageFormat.java |  4 +---
 .../server/namenode/FSImageSerialization.java   | 13 +--
 .../blockmanagement/TestBlockInfoStriped.java   | 23 ++--
 .../hdfs/server/namenode/TestFSImage.java   |  2 +-
 8 files changed, 31 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18a2e1e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 9f2f5ba..23e3153 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -244,13 +244,6 @@ public class BlockInfoStriped extends BlockInfo {
 return num;
   }
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-out.writeShort(dataBlockNum);
-out.writeShort(parityBlockNum);
-super.write(out);
-  }
-
   /**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction -  an under construction block.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18a2e1e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
index 0a84083..3f94227 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
@@ -54,10 +54,6 @@ public class ErasureCodingZoneManager {
 this.dir = dir;
   }
 
-  boolean getECPolicy(INodesInPath iip) throws IOException {
-return getECSchema(iip) != null;
-  }
-
   ECSchema getECSchema(INodesInPath iip) throws IOException {
 ECZoneInfo ecZoneInfo = getECZoneInfo(iip);
 return ecZoneInfo == null ? null : ecZoneInfo.getSchema();
@@ -109,7 +105,7 @@ public class ErasureCodingZoneManager {
   throw new IOException(Attempt to create an erasure coding zone  +
   for a file.);
 }
-if (getECPolicy(srcIIP)) {
+if (getECSchema(srcIIP) != null) {
   throw new IOException(Directory  + src +  is already in an  +
   erasure coding zone.);
 }
@@ -132,8 +128,10 @@ public class ErasureCodingZoneManager {
   void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
   throws IOException {
 assert dir.hasReadLock();
-if (getECPolicy(srcIIP)
-!= getECPolicy(dstIIP)) {
+final ECSchema srcSchema = getECSchema(srcIIP);
+final ECSchema dstSchema = getECSchema(dstIIP);
+if ((srcSchema != null  !srcSchema.equals(dstSchema)) ||
+(dstSchema != null  !dstSchema.equals(srcSchema))) {
   throw new IOException(
   src +  can't be moved because the source and destination have  +
   different erasure coding policies.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/18a2e1e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 

[36/50] hadoop git commit: HDFS-8156. Add/implement necessary APIs even we just have the system default schema. Contributed by Kai Zheng.

2015-05-04 Thread zhz
HDFS-8156. Add/implement necessary APIs even we just have the system default 
schema. Contributed by Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c3201545
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c3201545
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c3201545

Branch: refs/heads/HDFS-7285
Commit: c3201545fb05542e789e4101cf195a2a0a6028b1
Parents: aefddb0
Author: Zhe Zhang z...@apache.org
Authored: Wed Apr 22 14:48:54 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:29 2015 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 173 +++
 .../hadoop/io/erasurecode/TestECSchema.java |   2 +-
 .../hadoop/io/erasurecode/TestSchemaLoader.java |   6 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   2 +-
 .../hdfs/server/namenode/ECSchemaManager.java   |  79 -
 .../namenode/ErasureCodingZoneManager.java  |  16 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  29 +++-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   |   5 +-
 .../hadoop/hdfs/TestErasureCodingZones.java |  45 +++--
 10 files changed, 249 insertions(+), 111 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c3201545/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 32077f6..f058ea7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.io.erasurecode;
 
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.Map;
 
 /**
@@ -30,55 +31,80 @@ public final class ECSchema {
   public static final String CHUNK_SIZE_KEY = chunkSize;
   public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
-  private String schemaName;
-  private String codecName;
-  private MapString, String options;
-  private int numDataUnits;
-  private int numParityUnits;
-  private int chunkSize;
+  /**
+   * A friendly and understandable name that can mean what's it, also serves as
+   * the identifier that distinguish it from other schemas.
+   */
+  private final String schemaName;
+
+  /**
+   * The erasure codec name associated.
+   */
+  private final String codecName;
+
+  /**
+   * Number of source data units coded
+   */
+  private final int numDataUnits;
+
+  /**
+   * Number of parity units generated in a coding
+   */
+  private final int numParityUnits;
+
+  /**
+   * Unit data size for each chunk in a coding
+   */
+  private final int chunkSize;
+
+  /*
+   * An erasure code can have its own specific advanced parameters, subject to
+   * itself to interpret these key-value settings.
+   */
+  private final MapString, String extraOptions;
 
   /**
-   * Constructor with schema name and provided options. Note the options may
+   * Constructor with schema name and provided all options. Note the options 
may
* contain additional information for the erasure codec to interpret further.
* @param schemaName schema name
-   * @param options schema options
+   * @param allOptions all schema options
*/
-  public ECSchema(String schemaName, MapString, String options) {
+  public ECSchema(String schemaName, MapString, String allOptions) {
 assert (schemaName != null  ! schemaName.isEmpty());
 
 this.schemaName = schemaName;
 
-if (options == null || options.isEmpty()) {
+if (allOptions == null || allOptions.isEmpty()) {
   throw new IllegalArgumentException(No schema options are provided);
 }
 
-String codecName = options.get(CODEC_NAME_KEY);
+this.codecName = allOptions.get(CODEC_NAME_KEY);
 if (codecName == null || codecName.isEmpty()) {
   throw new IllegalArgumentException(No codec option is provided);
 }
 
-int dataUnits = 0, parityUnits = 0;
-try {
-  if (options.containsKey(NUM_DATA_UNITS_KEY)) {
-dataUnits = Integer.parseInt(options.get(NUM_DATA_UNITS_KEY));
-  }
-} catch (NumberFormatException e) {
-  throw new IllegalArgumentException(Option value  +
-  options.get(NUM_DATA_UNITS_KEY) +  for  + NUM_DATA_UNITS_KEY +
-   is found. It should be an integer);
+int tmpNumDataUnits = extractIntOption(NUM_DATA_UNITS_KEY, allOptions);
+int tmpNumParityUnits = extractIntOption(NUM_PARITY_UNITS_KEY, allOptions);
+  

[12/50] hadoop git commit: HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks. Contributed by Jing Zhao and Zhe Zhang.

2015-05-04 Thread zhz
HDFS-8077. Erasure coding: fix bugs in EC zone and symlinks. Contributed by 
Jing Zhao and Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/41128e91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/41128e91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/41128e91

Branch: refs/heads/HDFS-7285
Commit: 41128e918d82932c6a7f88674b5f6b8e324dbc4e
Parents: 76dcb47
Author: Jing Zhao ji...@apache.org
Authored: Thu Apr 9 17:53:22 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:22 2015 -0700

--
 .../BlockInfoStripedUnderConstruction.java |  2 +-
 .../hdfs/server/blockmanagement/BlockManager.java  | 12 ++--
 .../server/namenode/ErasureCodingZoneManager.java  |  7 +++
 .../hadoop/hdfs/server/namenode/FSDirectory.java   |  4 ++--
 .../hdfs/server/namenode/FSEditLogLoader.java  | 11 ++-
 .../hdfs/server/namenode/FSImageSerialization.java |  4 ++--
 .../hadoop/hdfs/server/namenode/INodeFile.java | 17 -
 .../hdfs/server/namenode/TestFSEditLogLoader.java  |  4 ++--
 .../hadoop/hdfs/server/namenode/TestFSImage.java   |  2 +-
 .../server/namenode/TestRecoverStripedBlocks.java  |  2 +-
 10 files changed, 32 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/41128e91/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
index cfaf3a0..0373314 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStripedUnderConstruction.java
@@ -96,7 +96,7 @@ public class BlockInfoStripedUnderConstruction extends 
BlockInfoStriped
 for(int i = 0; i  numLocations; i++) {
   // when creating a new block we simply sequentially assign block index to
   // each storage
-  Block blk = new Block(this.getBlockId() + i, this.getGenerationStamp(), 
0);
+  Block blk = new Block(this.getBlockId() + i, 0, 
this.getGenerationStamp());
   replicas[i] = new ReplicaUnderConstruction(blk, targets[i],
   ReplicaState.RBW);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/41128e91/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index baf28f1..7b6339b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2461,12 +2461,12 @@ public class BlockManager {
   case COMMITTED:
 if (storedBlock.getGenerationStamp() != reported.getGenerationStamp()) 
{
   final long reportedGS = reported.getGenerationStamp();
-  return new BlockToMarkCorrupt(reported, storedBlock, reportedGS,
+  return new BlockToMarkCorrupt(new Block(reported), storedBlock, 
reportedGS,
   block is  + ucState +  and reported genstamp  + reportedGS
   +  does not match genstamp in block map 
   + storedBlock.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
 } else if (storedBlock.getNumBytes() != reported.getNumBytes()) {
-  return new BlockToMarkCorrupt(reported, storedBlock,
+  return new BlockToMarkCorrupt(new Block(reported), storedBlock,
   block is  + ucState +  and reported length  +
   reported.getNumBytes() +  does not match  +
   length in block map  + storedBlock.getNumBytes(),
@@ -2477,7 +2477,7 @@ public class BlockManager {
   case UNDER_CONSTRUCTION:
 if (storedBlock.getGenerationStamp()  reported.getGenerationStamp()) {
   final long reportedGS = reported.getGenerationStamp();
-  return new BlockToMarkCorrupt(reported, storedBlock, reportedGS,
+  return new BlockToMarkCorrupt(new Block(reported), storedBlock, 
reportedGS,
   block is  + ucState +  and reported 

[35/50] hadoop git commit: HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to create BlockReader. Contributed by Tsz Wo Nicholas Sze.

2015-05-04 Thread zhz
HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to create 
BlockReader. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a0971b9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a0971b9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a0971b9b

Branch: refs/heads/HDFS-7285
Commit: a0971b9b5318685921642d2da1a27169f5589cb3
Parents: e875f1d
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 21 20:56:39 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:28 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../apache/hadoop/hdfs/BlockReaderTestUtil.java |  7 +--
 .../hadoop/hdfs/TestBlockReaderFactory.java | 16 +++---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 58 ++--
 4 files changed, 20 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0971b9b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 8f28285..d8f2e9d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -107,3 +107,6 @@
 
 HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.
 (szetszwo)
+
+HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to 
+create BlockReader. (szetszwo via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0971b9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
index 88b7f37..829cf03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/BlockReaderTestUtil.java
@@ -165,20 +165,19 @@ public class BlockReaderTestUtil {
*/
   public BlockReader getBlockReader(LocatedBlock testBlock, int offset, int 
lenToRead)
   throws IOException {
-return getBlockReader(cluster, testBlock, offset, lenToRead);
+return getBlockReader(cluster.getFileSystem(), testBlock, offset, 
lenToRead);
   }
 
   /**
* Get a BlockReader for the given block.
*/
-  public static BlockReader getBlockReader(MiniDFSCluster cluster,
-  LocatedBlock testBlock, int offset, int lenToRead) throws IOException {
+  public static BlockReader getBlockReader(final DistributedFileSystem fs,
+  LocatedBlock testBlock, int offset, long lenToRead) throws IOException {
 InetSocketAddress targetAddr = null;
 ExtendedBlock block = testBlock.getBlock();
 DatanodeInfo[] nodes = testBlock.getLocations();
 targetAddr = NetUtils.createSocketAddr(nodes[0].getXferAddr());
 
-final DistributedFileSystem fs = cluster.getFileSystem();
 return new BlockReaderFactory(fs.getClient().getConf()).
   setInetSocketAddress(targetAddr).
   setBlock(block).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a0971b9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
index d8aceff..1a767c3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockReaderFactory.java
@@ -250,8 +250,8 @@ public class TestBlockReaderFactory {
   LocatedBlock lblock = locatedBlocks.get(0); // first block
   BlockReader blockReader = null;
   try {
-blockReader = BlockReaderTestUtil.
-getBlockReader(cluster, lblock, 0, TEST_FILE_LEN);
+blockReader = BlockReaderTestUtil.getBlockReader(
+cluster.getFileSystem(), lblock, 0, TEST_FILE_LEN);
 Assert.fail(expected getBlockReader to fail the first time.);
   } catch (Throwable t) { 
 Assert.assertTrue(expected to see 'TCP reads were disabled  +
@@ -265,8 +265,8 @@ public class TestBlockReaderFactory {
 
   // Second time should succeed.
   

[42/50] hadoop git commit: HDFS-8033. Erasure coding: stateful (non-positional) read from files in striped layout. Contributed by Zhe Zhang.

2015-05-04 Thread zhz
HDFS-8033. Erasure coding: stateful (non-positional) read from files in striped 
layout. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e51018a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e51018a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e51018a1

Branch: refs/heads/HDFS-7285
Commit: e51018a1b0c645071dbf1eb6ba0354b5593e8290
Parents: 395e29b
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 24 22:36:15 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:30 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  55 ++--
 .../hadoop/hdfs/DFSStripedInputStream.java  | 311 ++-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  43 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 110 ++-
 5 files changed, 465 insertions(+), 57 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51018a1/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index cf41a9b..e8db485 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -131,3 +131,6 @@
 
 HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may 
cause 
 block id conflicts (Jing Zhao via Zhe Zhang)
+
+HDFS-8033. Erasure coding: stateful (non-positional) read from files in 
+striped layout (Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e51018a1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 16250dd..6eb25d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -95,34 +95,34 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   public static boolean tcpReadsDisabledForTesting = false;
   private long hedgedReadOpsLoopNumForTesting = 0;
   protected final DFSClient dfsClient;
-  private AtomicBoolean closed = new AtomicBoolean(false);
-  private final String src;
-  private final boolean verifyChecksum;
+  protected AtomicBoolean closed = new AtomicBoolean(false);
+  protected final String src;
+  protected final boolean verifyChecksum;
 
   // state by stateful read only:
   // (protected by lock on this)
   /
   private DatanodeInfo currentNode = null;
-  private LocatedBlock currentLocatedBlock = null;
-  private long pos = 0;
-  private long blockEnd = -1;
+  protected LocatedBlock currentLocatedBlock = null;
+  protected long pos = 0;
+  protected long blockEnd = -1;
   private BlockReader blockReader = null;
   
 
   // state shared by stateful and positional read:
   // (protected by lock on infoLock)
   
-  private LocatedBlocks locatedBlocks = null;
+  protected LocatedBlocks locatedBlocks = null;
   private long lastBlockBeingWrittenLength = 0;
   private FileEncryptionInfo fileEncryptionInfo = null;
-  private CachingStrategy cachingStrategy;
+  protected CachingStrategy cachingStrategy;
   
 
-  private final ReadStatistics readStatistics = new ReadStatistics();
+  protected final ReadStatistics readStatistics = new ReadStatistics();
   // lock for state shared between read and pread
   // Note: Never acquire a lock on this with this lock held to avoid 
deadlocks
   //   (it's OK to acquire this lock when the lock on this is held)
-  private final Object infoLock = new Object();
+  protected final Object infoLock = new Object();
 
   /**
* Track the ByteBuffers that we have handed out to readers.
@@ -239,7 +239,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* back to the namenode to get a new list of block locations, and is
* capped at maxBlockAcquireFailures
*/
-  private int failures = 0;
+  protected int failures = 0;
 
   /* XXX Use of CocurrentHashMap is temp fix. Need to fix 
* parallel accesses to DFSInputStream (through ptreads) properly */
@@ -476,7 +476,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   }
 
   /** Fetch a block from namenode and cache it */
-  private void fetchBlockAt(long offset) throws IOException {
+  protected void fetchBlockAt(long 

[47/50] hadoop git commit: HDFS-8282. Erasure coding: move striped reading logic to StripedBlockUtil. Contributed by Zhe Zhang.

2015-05-04 Thread zhz
HDFS-8282. Erasure coding: move striped reading logic to StripedBlockUtil. 
Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ef70904e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ef70904e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ef70904e

Branch: refs/heads/HDFS-7285
Commit: ef70904e6a104a7a9d2d68abf275dca3d13dca92
Parents: aae5452
Author: Zhe Zhang z...@apache.org
Authored: Wed Apr 29 23:49:52 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:31 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  | 111 +---
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 174 +++
 .../hadoop/hdfs/TestPlanReadPortions.java   |  11 +-
 4 files changed, 186 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef70904e/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 6a9bdee..ca60487 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -146,3 +146,6 @@
 
 HDFS-8272. Erasure Coding: simplify the retry logic in 
DFSStripedInputStream 
 (stateful read). (Jing Zhao via Zhe Zhang)
+
+HDFS-8282. Erasure coding: move striped reading logic to StripedBlockUtil.
+(Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ef70904e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 3da7306..0dc98fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -17,12 +17,14 @@
  */
 package org.apache.hadoop.hdfs;
 
-import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.*;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
+import static org.apache.hadoop.hdfs.util.StripedBlockUtil.ReadPortion;
+import static org.apache.hadoop.hdfs.util.StripedBlockUtil.planReadPortions;
+
 import org.apache.hadoop.net.NetUtils;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -31,8 +33,6 @@ import org.apache.htrace.TraceScope;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.nio.ByteBuffer;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.Set;
 import java.util.Map;
 import java.util.HashMap;
@@ -69,59 +69,6 @@ import java.util.concurrent.Future;
  *   3. pread with decode support: TODO: will be supported after HDFS-7678
  */
 public class DFSStripedInputStream extends DFSInputStream {
-  /**
-   * This method plans the read portion from each block in the stripe
-   * @param dataBlkNum The number of data blocks in the striping group
-   * @param cellSize The size of each striping cell
-   * @param startInBlk Starting offset in the striped block
-   * @param len Length of the read request
-   * @param bufOffset  Initial offset in the result buffer
-   * @return array of {@link ReadPortion}, each representing the portion of I/O
-   * for an individual block in the group
-   */
-  @VisibleForTesting
-  static ReadPortion[] planReadPortions(final int dataBlkNum,
-  final int cellSize, final long startInBlk, final int len, int bufOffset) 
{
-ReadPortion[] results = new ReadPortion[dataBlkNum];
-for (int i = 0; i  dataBlkNum; i++) {
-  results[i] = new ReadPortion();
-}
-
-// cellIdxInBlk is the index of the cell in the block
-// E.g., cell_3 is the 2nd cell in blk_0
-int cellIdxInBlk = (int) (startInBlk / (cellSize * dataBlkNum));
-
-// blkIdxInGroup is the index of the block in the striped block group
-// E.g., blk_2 is the 3rd block in the group
-final int blkIdxInGroup = (int) (startInBlk / cellSize % dataBlkNum);
-results[blkIdxInGroup].startOffsetInBlock = cellSize * cellIdxInBlk +
-startInBlk % cellSize;
-boolean 

[37/50] hadoop git commit: HDFS-8136. Client gets and uses EC schema when reads and writes a stripping file. Contributed by Kai Sasaki

2015-05-04 Thread zhz
HDFS-8136. Client gets and uses EC schema when reads and writes a stripping 
file. Contributed by Kai Sasaki


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8ee9f348
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8ee9f348
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8ee9f348

Branch: refs/heads/HDFS-7285
Commit: 8ee9f3481a5a02fd0d1d43780d5ff46105039cf5
Parents: c320154
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 24 00:19:12 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:29 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  |  17 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java |  24 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 175 +++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |   4 +-
 .../apache/hadoop/hdfs/TestReadStripedFile.java |   1 -
 7 files changed, 210 insertions(+), 16 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee9f348/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index b2faac0..8977c46 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -119,3 +119,6 @@
 
 HDFS-8156. Add/implement necessary APIs even we just have the system 
default 
 schema. (Kai Zheng via Zhe Zhang)
+
+HDFS-8136. Client gets and uses EC schema when reads and writes a stripping
+file. (Kai Sasaki via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee9f348/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index d597407..d0e2b68 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -21,9 +21,9 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
@@ -125,13 +125,19 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return results;
   }
 
-  private int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
-  private final short dataBlkNum = HdfsConstants.NUM_DATA_BLOCKS;
-  private final short parityBlkNum = HdfsConstants.NUM_PARITY_BLOCKS;
+  private final int cellSize;
+  private final short dataBlkNum;
+  private final short parityBlkNum;
+  private final ECInfo ecInfo;
 
   DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum)
   throws IOException {
 super(dfsClient, src, verifyChecksum);
+// ECInfo is restored from NN just before reading striped file.
+ecInfo = dfsClient.getErasureCodingInfo(src);
+cellSize = ecInfo.getSchema().getChunkSize();
+dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
+parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();
 DFSClient.LOG.debug(Creating an striped input stream for file  + src);
   }
 
@@ -279,9 +285,6 @@ public class DFSStripedInputStream extends DFSInputStream {
 throw new InterruptedException(let's retry);
   }
 
-  public void setCellSize(int cellSize) {
-this.cellSize = cellSize;
-  }
 
   /**
* This class represents the portion of I/O associated with each block in the

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8ee9f348/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 

[10/50] hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B) Added missed file

2015-05-04 Thread zhz
HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)
Added missed file


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/736202e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/736202e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/736202e9

Branch: refs/heads/HDFS-7285
Commit: 736202e96c572b5e920a370aa5c7170b9fdc3b56
Parents: 48f975a
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 14:23:03 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:07 2015 -0700

--
 .../org/apache/hadoop/hdfs/protocol/ECInfo.java | 41 
 1 file changed, 41 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/736202e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
new file mode 100644
index 000..ca642c2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ECInfo.java
@@ -0,0 +1,41 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+
+/**
+ * Class to provide information, such as ECSchema, for a file/block.
+ */
+public class ECInfo {
+  private final String src;
+  private final ECSchema schema;
+
+  public ECInfo(String src, ECSchema schema) {
+this.src = src;
+this.schema = schema;
+  }
+
+  public String getSrc() {
+return src;
+  }
+
+  public ECSchema getSchema() {
+return schema;
+  }
+}



[43/50] hadoop git commit: HDFS-8230. Erasure Coding: Ignore DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY commands from standbynode if any (Contributed by Vinayakumar B)

2015-05-04 Thread zhz
HDFS-8230. Erasure Coding: Ignore DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY 
commands from standbynode if any (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f7064bbd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f7064bbd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f7064bbd

Branch: refs/heads/HDFS-7285
Commit: f7064bbd941eecc512c1016b7997fe2c451460eb
Parents: e51018a
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 28 14:14:33 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:30 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/datanode/BPOfferService.java| 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7064bbd/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index e8db485..c28473b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -134,3 +134,6 @@
 
 HDFS-8033. Erasure coding: stateful (non-positional) read from files in 
 striped layout (Zhe Zhang)
+
+HDFS-8230. Erasure Coding: Ignore 
DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY 
+commands from standbynode if any (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f7064bbd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
index 69baac7..6606d0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
@@ -757,6 +757,7 @@ class BPOfferService {
 case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
 case DatanodeProtocol.DNA_CACHE:
 case DatanodeProtocol.DNA_UNCACHE:
+case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
   LOG.warn(Got a command from standby NN - ignoring command: + 
cmd.getAction());
   break;
 default:



[31/50] hadoop git commit: HDFS-8181. createErasureCodingZone sets retryCache state as false always (Contributed by Uma Maheswara Rao G)

2015-05-04 Thread zhz
HDFS-8181. createErasureCodingZone sets retryCache state as false always 
(Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/569f8752
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/569f8752
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/569f8752

Branch: refs/heads/HDFS-7285
Commit: 569f87527f8d7f4facbb2543af2fa24ee05bb611
Parents: 7858b54
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 15:04:49 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:27 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 14 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java   |  1 +
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/569f8752/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 40517e7..c8dbf08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -83,10 +83,24 @@
 
 HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
 
+HDFS-8120. Erasure coding: created util class to analyze striped block 
groups.
+(Contributed by Zhe Zhang and Li Bo via Jing Zhao)
+
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
 
 HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).
 
+HDFS-8166. DFSStripedOutputStream should not create empty blocks. (Jing 
Zhao)
+
+HDFS-7937. Erasure Coding: INodeFile quota computation unit tests.
+(Kai Sasaki via Jing Zhao)
+
+HDFS-8145. Fix the editlog corruption exposed by failed 
TestAddStripedBlocks.
+(Jing Zhao)
+
 HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
 making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8181. createErasureCodingZone sets retryCache state as false always
+(Uma Maheswara Rao G via vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/569f8752/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 023f863..06701a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1856,6 +1856,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 boolean success = false;
 try {
   namesystem.createErasureCodingZone(src, schema, cacheEntry != null);
+  success = true;
 } finally {
   RetryCache.setState(cacheEntry, success);
 }



[01/50] hadoop git commit: HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng

2015-05-04 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 26770f8a3 - c4fcea80f (forced update)


HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/debec6fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/debec6fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/debec6fc

Branch: refs/heads/HDFS-7285
Commit: debec6fcebda6b27f48c254ebb4344640c0fff5f
Parents: 328d7d2
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:26:40 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:11:31 2015 -0700

--
 .../io/erasurecode/coder/RSErasureDecoder.java  |  8 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  4 +-
 .../io/erasurecode/coder/XORErasureDecoder.java | 78 
 .../io/erasurecode/coder/XORErasureEncoder.java | 45 ++
 .../io/erasurecode/coder/XorErasureDecoder.java | 78 
 .../io/erasurecode/coder/XorErasureEncoder.java | 45 --
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  | 69 ---
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  | 78 
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ---
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 69 +++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 78 
 .../rawcoder/RSRawErasureCoderFactory.java  | 34 +++
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 81 +
 .../io/erasurecode/rawcoder/XORRawEncoder.java  | 61 +
 .../rawcoder/XORRawErasureCoderFactory.java | 34 +++
 .../io/erasurecode/rawcoder/XorRawDecoder.java  | 81 -
 .../io/erasurecode/rawcoder/XorRawEncoder.java  | 61 -
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ---
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 .../io/erasurecode/coder/TestXORCoder.java  | 50 +++
 .../io/erasurecode/coder/TestXorCoder.java  | 50 ---
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 93 
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 93 
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 49 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   | 51 ---
 25 files changed, 680 insertions(+), 682 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/debec6fc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index ba32f04..e2c5051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -4,9 +4,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.rawcoder.JRSRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
-import org.apache.hadoop.io.erasurecode.rawcoder.XorRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
 
 /**
  * Reed-Solomon erasure decoder that decodes a block group.
@@ -56,7 +56,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
   rsRawDecoder = createRawDecoder(
   CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY);
   if (rsRawDecoder == null) {
-rsRawDecoder = new JRSRawDecoder();
+rsRawDecoder = new RSRawDecoder();
   }
   rsRawDecoder.initialize(getNumDataUnits(),
   getNumParityUnits(), getChunkSize());
@@ -66,7 +66,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
 
   private RawErasureDecoder checkCreateXorRawDecoder() {
 if (xorRawDecoder == null) {
-  xorRawDecoder = new XorRawDecoder();
+  xorRawDecoder = new XORRawDecoder();
   xorRawDecoder.initialize(getNumDataUnits(), 1, getChunkSize());
 }
 return xorRawDecoder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/debec6fc/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 

[38/50] hadoop git commit: HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the last stripe is at the block group boundary. Contributed by Jing Zhao.

2015-05-04 Thread zhz
HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the last 
stripe is at the block group boundary. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cdd1a786
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cdd1a786
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cdd1a786

Branch: refs/heads/HDFS-7285
Commit: cdd1a78639b8156d5bafbdcbbd0e6031c2cd5bfe
Parents: 8ee9f34
Author: Jing Zhao ji...@apache.org
Authored: Thu Apr 23 15:43:04 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:29 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java | 51 +---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  6 +++
 3 files changed, 34 insertions(+), 28 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdd1a786/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 8977c46..48791b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -121,4 +121,7 @@
 schema. (Kai Zheng via Zhe Zhang)
 
 HDFS-8136. Client gets and uses EC schema when reads and writes a stripping
-file. (Kai Sasaki via Kai Zheng)
\ No newline at end of file
+file. (Kai Sasaki via Kai Zheng)
+
+HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the 
last
+stripe is at the block group boundary. (jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cdd1a786/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index eeb9d7e..245dfc1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.io.erasurecode.rawcoder.RSRawEncoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 import org.apache.hadoop.util.DataChecksum;
@@ -278,14 +277,6 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
 return numDataBlocks * cellSize;
   }
 
-  private long getCurrentBlockGroupBytes() {
-long sum = 0;
-for (int i = 0; i  numDataBlocks; i++) {
-  sum += streamers.get(i).getBytesCurBlock();
-}
-return sum;
-  }
-
   private void notSupported(String headMsg)
   throws IOException{
   throw new IOException(
@@ -347,37 +338,43 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
 }
   }
 
+  /**
+   * Simply add bytesCurBlock together. Note that this result is not accurately
+   * the size of the block group.
+   */
+  private long getCurrentSumBytes() {
+long sum = 0;
+for (int i = 0; i  numDataBlocks; i++) {
+  sum += streamers.get(i).getBytesCurBlock();
+}
+return sum;
+  }
+
   private void writeParityCellsForLastStripe() throws IOException {
-final long currentBlockGroupBytes = getCurrentBlockGroupBytes();
-long parityBlkSize = StripedBlockUtil.getInternalBlockLength(
-currentBlockGroupBytes, cellSize, numDataBlocks,
-numDataBlocks + 1);
-if (parityBlkSize == 0 || currentBlockGroupBytes % stripeDataSize() == 0) {
+final long currentBlockGroupBytes = getCurrentSumBytes();
+if (currentBlockGroupBytes % stripeDataSize() == 0) {
   return;
 }
-int parityCellSize = parityBlkSize % cellSize == 0 ? cellSize :
-(int) (parityBlkSize % cellSize);
+long firstCellSize = getLeadingStreamer().getBytesCurBlock() % cellSize;
+long parityCellSize = firstCellSize  0  firstCellSize  cellSize ?
+firstCellSize : cellSize;
 
 for (int i = 0; i  numAllBlocks; i++) {
-  long internalBlkLen = StripedBlockUtil.getInternalBlockLength(
-  currentBlockGroupBytes, cellSize, numDataBlocks, i);
   // Pad zero bytes to make all cells exactly the size of parityCellSize
   // If internal 

[34/50] hadoop git commit: HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.

2015-05-04 Thread zhz
HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e875f1dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e875f1dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e875f1dc

Branch: refs/heads/HDFS-7285
Commit: e875f1dcbc64a297dad2c654cec7470ff3b5084d
Parents: 08b56e4
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Apr 20 17:42:02 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:28 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  61 ---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 178 +++
 3 files changed, 100 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e875f1dc/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index c8dbf08..8f28285 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -104,3 +104,6 @@
 
 HDFS-8181. createErasureCodingZone sets retryCache state as false always
 (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.
+(szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e875f1dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 2368021..d622d4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Utility class for analyzing striped block groups
  */
@@ -81,46 +83,43 @@ public class StripedBlockUtil {
   /**
* Get the size of an internal block at the given index of a block group
*
-   * @param numBytesInGroup Size of the block group only counting data blocks
+   * @param dataSize Size of the block group only counting data blocks
* @param cellSize The size of a striping cell
-   * @param dataBlkNum The number of data blocks
-   * @param idxInGroup The logical index in the striped block group
+   * @param numDataBlocks The number of data blocks
+   * @param i The logical index in the striped block group
* @return The size of the internal block at the specified index
*/
-  public static long getInternalBlockLength(long numBytesInGroup,
-  int cellSize, int dataBlkNum, int idxInGroup) {
+  public static long getInternalBlockLength(long dataSize,
+  int cellSize, int numDataBlocks, int i) {
+Preconditions.checkArgument(dataSize = 0);
+Preconditions.checkArgument(cellSize  0);
+Preconditions.checkArgument(numDataBlocks  0);
+Preconditions.checkArgument(i = 0);
 // Size of each stripe (only counting data blocks)
-final long numBytesPerStripe = cellSize * dataBlkNum;
-assert numBytesPerStripe   0:
-getInternalBlockLength should only be called on valid striped blocks;
+final int stripeSize = cellSize * numDataBlocks;
 // If block group ends at stripe boundary, each internal block has an equal
 // share of the group
-if (numBytesInGroup % numBytesPerStripe == 0) {
-  return numBytesInGroup / dataBlkNum;
+final int lastStripeDataLen = (int)(dataSize % stripeSize);
+if (lastStripeDataLen == 0) {
+  return dataSize / numDataBlocks;
 }
 
-int numStripes = (int) ((numBytesInGroup - 1) / numBytesPerStripe + 1);
-assert numStripes = 1 : There should be at least 1 stripe;
-
-// All stripes but the last one are full stripes. The block should at least
-// contain (numStripes - 1) full cells.
-long blkSize = (numStripes - 1) * cellSize;
-
-long lastStripeLen = numBytesInGroup % numBytesPerStripe;
-// Size of parity cells should equal the size of the first cell, if it
-// is not full.
-long lastParityCellLen = Math.min(cellSize, lastStripeLen);
-
-if (idxInGroup = dataBlkNum) {
-  // for 

[05/50] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang. Updated CHANGES-HDFS-EC-7285.txt

2015-05-04 Thread zhz
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8f013767
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8f013767
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8f013767

Branch: refs/heads/HDFS-7285
Commit: 8f01376759a19f34f93ccd8dda3484e675bb16fe
Parents: 03ba1ab
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:35:18 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:11:32 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 5 +
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f013767/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 68d1d32..7716728 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -33,5 +33,7 @@
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
 
+HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
+
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8f013767/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3874cb4..9927ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,7 +49,4 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
-
-HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
-
+manage EC zones (Zhe Zhang)
\ No newline at end of file



[41/50] hadoop git commit: HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may cause block id conflicts. Contributed by Jing Zhao.

2015-05-04 Thread zhz
HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may cause 
block id conflicts. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/395e29bb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/395e29bb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/395e29bb

Branch: refs/heads/HDFS-7285
Commit: 395e29bb0bf4f46722a0e5cea0bffba64052edde
Parents: 254759d
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 24 09:30:38 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:30 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 ++
 .../SequentialBlockGroupIdGenerator.java| 39 +++---
 .../SequentialBlockIdGenerator.java |  2 +-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  | 57 +++-
 .../server/namenode/TestAddStripedBlocks.java   | 21 
 5 files changed, 77 insertions(+), 45 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/395e29bb/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9357e23..cf41a9b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -128,3 +128,6 @@
 
 HDFS-8223. Should calculate checksum for parity blocks in 
DFSStripedOutputStream.
 (Yi Liu via jing9)
+
+HDFS-8228. Erasure Coding: SequentialBlockGroupIdGenerator#nextValue may 
cause 
+block id conflicts (Jing Zhao via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/395e29bb/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
index e9e22ee..de8e379 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/SequentialBlockGroupIdGenerator.java
@@ -19,9 +19,11 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.util.SequentialNumber;
 
+import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_GROUP_INDEX_MASK;
+import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.MAX_BLOCKS_IN_GROUP;
+
 /**
  * Generate the next valid block group ID by incrementing the maximum block
  * group ID allocated so far, with the first 2^10 block group IDs reserved.
@@ -34,6 +36,9 @@ import org.apache.hadoop.util.SequentialNumber;
  * bits (n+2) to (64-m) represent the ID of its block group, while the last m
  * bits represent its index of the group. The value m is determined by the
  * maximum number of blocks in a group (MAX_BLOCKS_IN_GROUP).
+ *
+ * Note that the {@link #nextValue()} methods requires external lock to
+ * guarantee IDs have no conflicts.
  */
 @InterfaceAudience.Private
 public class SequentialBlockGroupIdGenerator extends SequentialNumber {
@@ -47,32 +52,30 @@ public class SequentialBlockGroupIdGenerator extends 
SequentialNumber {
 
   @Override // NumberGenerator
   public long nextValue() {
-// Skip to next legitimate block group ID based on the naming protocol
-while (super.getCurrentValue() % HdfsConstants.MAX_BLOCKS_IN_GROUP  0) {
-  super.nextValue();
-}
+skipTo((getCurrentValue()  ~BLOCK_GROUP_INDEX_MASK) + 
MAX_BLOCKS_IN_GROUP);
 // Make sure there's no conflict with existing random block IDs
-while (hasValidBlockInRange(super.getCurrentValue())) {
-  super.skipTo(super.getCurrentValue() +
-  HdfsConstants.MAX_BLOCKS_IN_GROUP);
+final Block b = new Block(getCurrentValue());
+while (hasValidBlockInRange(b)) {
+  skipTo(getCurrentValue() + MAX_BLOCKS_IN_GROUP);
+  b.setBlockId(getCurrentValue());
 }
-if (super.getCurrentValue() = 0) {
-  BlockManager.LOG.warn(All negative block group IDs are used,  +
-  growing into positive IDs,  +
-  which might conflict with non-erasure coded blocks.);
+if (b.getBlockId() = 0) {
+  throw new IllegalStateException(All 

[28/50] hadoop git commit: HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 and HDFS-8169. Contributed by Zhe Zhang.

2015-05-04 Thread zhz
HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 
and HDFS-8169. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08b56e43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08b56e43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08b56e43

Branch: refs/heads/HDFS-7285
Commit: 08b56e435593579c8a99c252390467f9c8767c26
Parents: 569f875
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 20 14:19:12 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:27 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   | 12 
 .../hdfs/protocol/LocatedStripedBlock.java  | 64 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 21 ++
 .../hadoop/hdfs/client/impl/DfsClientConf.java  | 21 +-
 .../hdfs/protocol/LocatedStripedBlock.java  | 73 
 .../server/blockmanagement/BlockManager.java| 25 ---
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../server/namenode/TestStripedINodeFile.java   |  3 +-
 8 files changed, 120 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08b56e43/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index 86c8a87..dc2f1d5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -176,6 +176,18 @@ public interface HdfsClientConfigKeys {
 int THREADPOOL_SIZE_DEFAULT = 0;
   }
 
+  /** dfs.client.read.striped configuration properties */
+  interface StripedRead {
+String PREFIX = Read.PREFIX + striped.;
+
+String  THREADPOOL_SIZE_KEY = PREFIX + threadpool.size;
+/**
+ * With default 6+3 schema, each normal read could span 6 DNs. So this
+ * default value accommodates 3 read streams
+ */
+int THREADPOOL_SIZE_DEFAULT = 18;
+  }
+
   /** dfs.http.client configuration properties */
   interface HttpClient {
 String  PREFIX = dfs.http.client.;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08b56e43/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
new file mode 100644
index 000..93a5948
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
+
+import java.util.Arrays;
+
+/**
+ * {@link LocatedBlock} with striped block support. For a striped block, each
+ * datanode storage is associated with a block in the block group. We need to
+ * record the index (in the striped block group) for each of them.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class LocatedStripedBlock extends LocatedBlock {
+  private int[] blockIndices;
+
+  public LocatedStripedBlock(ExtendedBlock b, DatanodeInfo[] locs,
+  String[] storageIDs, StorageType[] storageTypes, int[] indices,
+  long startOffset, boolean corrupt, DatanodeInfo[] 

[30/50] hadoop git commit: HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for making it ready for transfer to DN (Contributed by Uma Maheswara Rao G)

2015-05-04 Thread zhz
HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for 
making it ready for transfer to DN (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7858b54a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7858b54a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7858b54a

Branch: refs/heads/HDFS-7285
Commit: 7858b54a8d1741b68d5a75fbcabcdd6289a99077
Parents: 18a2e1e
Author: Vinayakumar B vinayakum...@apache.org
Authored: Sat Apr 18 23:20:45 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:27 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 137 ++-
 .../blockmanagement/DatanodeDescriptor.java |  31 +
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../server/protocol/BlockECRecoveryCommand.java |  80 ++-
 .../hdfs/server/protocol/DatanodeProtocol.java  |   2 +-
 .../src/main/proto/DatanodeProtocol.proto   |   8 ++
 .../src/main/proto/erasurecoding.proto  |  13 ++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  88 
 .../namenode/TestRecoverStripedBlocks.java  |  10 +-
 10 files changed, 335 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7858b54a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 0ed61cd..40517e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -87,3 +87,6 @@
 startup. (Hui Zheng via szetszwo)
 
 HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).
+
+HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
+making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7858b54a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 61a636b..5662a45 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -28,6 +28,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -100,7 +101,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTyp
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocol.proto.*;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
@@ -121,6 +122,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmI
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECRecoveryCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
@@ -132,11 +134,11 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDele
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
 import 

[50/50] hadoop git commit: HDFS-8183. Erasure Coding: Improve DFSStripedOutputStream closing of datastreamer threads. Contributed by Rakesh R.

2015-05-04 Thread zhz
HDFS-8183. Erasure Coding: Improve DFSStripedOutputStream closing of 
datastreamer threads. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b076055
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b076055
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b076055

Branch: refs/heads/HDFS-7285
Commit: 3b076055a995ebeef7e1ece8b08053fb3e1c5a93
Parents: ef70904
Author: Zhe Zhang z...@apache.org
Authored: Thu Apr 30 00:13:32 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:32 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +++
 .../org/apache/hadoop/hdfs/DFSStripedOutputStream.java  | 12 ++--
 2 files changed, 13 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b076055/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index ca60487..3c75152 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -149,3 +149,6 @@
 
 HDFS-8282. Erasure coding: move striped reading logic to StripedBlockUtil.
 (Zhe Zhang)
+
+HDFS-8183. Erasure Coding: Improve DFSStripedOutputStream closing of 
+datastreamer threads. (Rakesh R via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b076055/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index c930187..5e2a534 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -331,18 +331,26 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   // interrupt datastreamer if force is true
   @Override
   protected void closeThreads(boolean force) throws IOException {
+int index = 0;
+boolean exceptionOccurred = false;
 for (StripedDataStreamer streamer : streamers) {
   try {
 streamer.close(force);
 streamer.join();
 streamer.closeSocket();
-  } catch (InterruptedException e) {
-throw new IOException(Failed to shutdown streamer);
+  } catch (InterruptedException | IOException e) {
+DFSClient.LOG.error(Failed to shutdown streamer: name=
++ streamer.getName() + , index= + index + , file= + src, e);
+exceptionOccurred = true;
   } finally {
 streamer.setSocketToNull();
 setClosed();
+index++;
   }
 }
+if (exceptionOccurred) {
+  throw new IOException(Failed to shutdown streamer);
+}
   }
 
   /**



[03/50] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin Updated CHANGES-HDFS-EC-7285.txt

2015-05-04 Thread zhz
HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/03ba1ab3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/03ba1ab3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/03ba1ab3

Branch: refs/heads/HDFS-7285
Commit: 03ba1ab3bfe8fd8d133864547c96e639b766a3fc
Parents: d37f00e
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:34:37 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:11:32 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/03ba1ab3/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 4e60a7c..3874cb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -48,9 +48,6 @@
 HDFS-7617. Add unit tests for editlog transactions for EC 
 (Hui Zheng via Zhe Zhang)
 
-HADOOP-11782. Correct two thrown messages in ECSchema class
-(Xinwei Qin via Kai Zheng)
-
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 



[22/50] hadoop git commit: HDFS-8120. Erasure coding: created util class to analyze striped block groups. Contributed by Zhe Zhang and Li Bo.

2015-05-04 Thread zhz
HDFS-8120. Erasure coding: created util class to analyze striped block groups. 
Contributed by Zhe Zhang and Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4d9bfb8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4d9bfb8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4d9bfb8

Branch: refs/heads/HDFS-7285
Commit: e4d9bfb8b4ab584f0abd051ae4b854613d26705f
Parents: a969f86
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:59:27 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:25 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   4 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  77 +++
 .../hadoop/hdfs/DFSStripedOutputStream.java |  34 +++--
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  58 ++--
 .../server/blockmanagement/BlockManager.java|  26 +++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 138 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  91 +++-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  83 +--
 .../apache/hadoop/hdfs/TestReadStripedFile.java |  92 +++--
 .../server/namenode/TestAddStripedBlocks.java   | 107 ++
 .../namenode/TestRecoverStripedBlocks.java  |   3 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 125 +
 12 files changed, 562 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4d9bfb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 9104f84..16250dd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1148,9 +1148,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 for (int i = 0; i  offsets.length; i++) {
   int nread = reader.readAll(buf, offsets[i], lengths[i]);
   updateReadStatistics(readStatistics, nread, reader);
-  if (nread != len) {
+  if (nread != lengths[i]) {
 throw new IOException(truncated return from reader.read():  +
-excpected  + len + , got  + nread);
+excpected  + lengths[i] + , got  + nread);
   }
 }
 DFSClientFaultInjector.get().readFromDatanodeDelay();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4d9bfb8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 8a431b1..d597407 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -50,7 +51,7 @@ import java.util.concurrent.Future;
  *
  * | - Striped Block Group - |
  *  blk_0  blk_1   blk_2   - A striped block group has
- *|  |   |  {@link #groupSize} blocks
+ *|  |   |  {@link #dataBlkNum} blocks
  *v  v   v
  * +--+   +--+   +--+
  * |cell_0|   |cell_1|   |cell_2|  - The logical read order should be
@@ -72,7 +73,7 @@ import java.util.concurrent.Future;
 public class DFSStripedInputStream extends DFSInputStream {
   /**
* This method plans the read portion from each block in the stripe
-   * @param groupSize The size / width of the striping group
+   * @param dataBlkNum The number of data blocks in the striping group
* @param cellSize The size of each striping cell
* @param startInBlk Starting offset in the striped block
* @param len Length of the read request
@@ -81,29 +82,29 @@ public class DFSStripedInputStream extends DFSInputStream {
* for an individual block 

[49/50] hadoop git commit: HDFS-7949. WebImageViewer need support file size calculation with striped blocks. Contributed by Rakesh R.

2015-05-04 Thread zhz
HDFS-7949. WebImageViewer need support file size calculation with striped 
blocks. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4fcea80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4fcea80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4fcea80

Branch: refs/heads/HDFS-7285
Commit: c4fcea80f0a5beadded14fb7efa6cf05f77f4031
Parents: 801a0ed
Author: Zhe Zhang z...@apache.org
Authored: Fri May 1 15:59:58 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:32 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../blockmanagement/BlockInfoStriped.java   |  27 +--
 .../tools/offlineImageViewer/FSImageLoader.java |  21 ++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  22 +++
 ...TestOfflineImageViewerWithStripedBlocks.java | 166 +++
 5 files changed, 212 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4fcea80/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 596bbcf..145494f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -155,3 +155,6 @@
 
 HDFS-8308. Erasure Coding: NameNode may get blocked in 
waitForLoadingFSImage()
 when loading editlog. (jing9)
+
+HDFS-7949. WebImageViewer need support file size calculation with striped 
+blocks. (Rakesh R via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4fcea80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 23e3153..f0e52e3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -19,9 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
-
-import java.io.DataOutput;
-import java.io.IOException;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 
 import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
 
@@ -203,28 +201,9 @@ public class BlockInfoStriped extends BlockInfo {
 // In case striped blocks, total usage by this striped blocks should
 // be the total of data blocks and parity blocks because
 // `getNumBytes` is the total of actual data block size.
-
-// 0. Calculate the total bytes per stripes Num Bytes per Stripes
-long numBytesPerStripe = dataBlockNum * BLOCK_STRIPED_CELL_SIZE;
-if (getNumBytes() % numBytesPerStripe == 0) {
-  return getNumBytes() / dataBlockNum * getTotalBlockNum();
+return StripedBlockUtil.spaceConsumedByStripedBlock(getNumBytes(),
+dataBlockNum, parityBlockNum, BLOCK_STRIPED_CELL_SIZE);
 }
-// 1. Calculate the number of stripes in this block group. Num Stripes
-long numStripes = (getNumBytes() - 1) / numBytesPerStripe + 1;
-// 2. Calculate the parity cell length in the last stripe. Note that the
-//size of parity cells should equal the size of the first cell, if it
-//is not full. Last Stripe Parity Cell Length
-long lastStripeParityCellLen = Math.min(getNumBytes() % numBytesPerStripe,
-BLOCK_STRIPED_CELL_SIZE);
-// 3. Total consumed space is the total of
-// - The total of the full cells of data blocks and parity blocks.
-// - The remaining of data block which does not make a stripe.
-// - The last parity block cells. These size should be same
-//   to the first cell in this stripe.
-return getTotalBlockNum() * (BLOCK_STRIPED_CELL_SIZE * (numStripes - 1))
-+ getNumBytes() % numBytesPerStripe
-+ lastStripeParityCellLen * parityBlockNum;
-  }
 
   @Override
   public final boolean isStriped() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4fcea80/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java
--
diff --git 

[27/50] hadoop git commit: HDFS-8166. DFSStripedOutputStream should not create empty blocks. Contributed by Jing Zhao.

2015-05-04 Thread zhz
HDFS-8166. DFSStripedOutputStream should not create empty blocks. Contributed 
by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/567105ef
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/567105ef
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/567105ef

Branch: refs/heads/HDFS-7285
Commit: 567105ef3f58c94aa55d607e4d1d150c772bb167
Parents: 647173e
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 17:55:19 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:26 2015 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 163 +++
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  72 +++-
 .../server/blockmanagement/BlockManager.java|  17 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 162 +++---
 4 files changed, 236 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/567105ef/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index f11a657..7dc0091 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -22,10 +22,14 @@ import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -59,12 +63,12 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
*/
   private int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
   private ByteBuffer[] cellBuffers;
-  private final short blockGroupBlocks = HdfsConstants.NUM_DATA_BLOCKS
+  private final short numAllBlocks = HdfsConstants.NUM_DATA_BLOCKS
   + HdfsConstants.NUM_PARITY_BLOCKS;
-  private final short blockGroupDataBlocks = HdfsConstants.NUM_DATA_BLOCKS;
+  private final short numDataBlocks = HdfsConstants.NUM_DATA_BLOCKS;
   private int curIdx = 0;
   /* bytes written in current block group */
-  private long currentBlockGroupBytes = 0;
+  //private long currentBlockGroupBytes = 0;
 
   //TODO: Use ErasureCoder interface (HDFS-7781)
   private RawErasureEncoder encoder;
@@ -73,10 +77,6 @@ public class DFSStripedOutputStream extends DFSOutputStream {
 return streamers.get(0);
   }
 
-  private long getBlockGroupSize() {
-return blockSize * HdfsConstants.NUM_DATA_BLOCKS;
-  }
-
   /** Construct a new output stream for creating a file. */
   DFSStripedOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat,
  EnumSetCreateFlag flag, Progressable progress,
@@ -84,15 +84,13 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
  throws IOException {
 super(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
 DFSClient.LOG.info(Creating striped output stream);
-if (blockGroupBlocks = 1) {
-  throw new IOException(The block group must contain more than one 
block.);
-}
+checkConfiguration();
 
-cellBuffers = new ByteBuffer[blockGroupBlocks];
+cellBuffers = new ByteBuffer[numAllBlocks];
 ListBlockingQueueLocatedBlock stripeBlocks = new ArrayList();
 
-for (int i = 0; i  blockGroupBlocks; i++) {
-  stripeBlocks.add(new 
LinkedBlockingQueueLocatedBlock(blockGroupBlocks));
+for (int i = 0; i  numAllBlocks; i++) {
+  stripeBlocks.add(new LinkedBlockingQueueLocatedBlock(numAllBlocks));
   try {
 cellBuffers[i] = 
ByteBuffer.wrap(byteArrayManager.newByteArray(cellSize));
   } catch (InterruptedException ie) {
@@ -103,29 +101,38 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   }
 }
 encoder = new RSRawEncoder();
-encoder.initialize(blockGroupDataBlocks,
-blockGroupBlocks - blockGroupDataBlocks, cellSize);
+encoder.initialize(numDataBlocks,
+numAllBlocks - numDataBlocks, cellSize);
 
-streamers = new ArrayList(blockGroupBlocks);
-for (short i = 0; i  blockGroupBlocks; i++) {
+ListStripedDataStreamer s = new 

[33/50] hadoop git commit: HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema in FileSystemLinkResolver. Contributed by Tsz Wo Nicholas Sze.

2015-05-04 Thread zhz
HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema in 
FileSystemLinkResolver. Contributed by Tsz Wo Nicholas Sze.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/754e62a4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/754e62a4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/754e62a4

Branch: refs/heads/HDFS-7285
Commit: 754e62a4c1e4f4fc565efb23da0e263b03ccaffe
Parents: a0971b9
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 21 21:03:07 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:28 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 3 +++
 .../main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java   | 2 +-
 2 files changed, 4 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/754e62a4/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index d8f2e9d..3d86f05 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -110,3 +110,6 @@
 
 HDFS-8216. TestDFSStripedOutputStream should use BlockReaderTestUtil to 
 create BlockReader. (szetszwo via Zhe Zhang)
+
+HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema
+in FileSystemLinkResolver. (szetszwo via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/754e62a4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 4c8fff3..ede4f48 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -2281,7 +2281,7 @@ public class DistributedFileSystem extends FileSystem {
   @Override
   public Void doCall(final Path p) throws IOException,
   UnresolvedLinkException {
-dfs.createErasureCodingZone(getPathName(p), null);
+dfs.createErasureCodingZone(getPathName(p), schema);
 return null;
   }
 



[45/50] hadoop git commit: Fix merge conflicts.

2015-05-04 Thread zhz
Fix merge conflicts.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0023b10e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0023b10e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0023b10e

Branch: refs/heads/HDFS-7285
Commit: 0023b10e3369ab62e79005c7b30556fbb9cb627c
Parents: 4f53dc3
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 29 11:35:58 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:31 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSInputStream.java  |  7 +++
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java   | 15 ---
 .../java/org/apache/hadoop/hdfs/DataStreamer.java|  1 -
 .../org/apache/hadoop/hdfs/StripedDataStreamer.java  |  7 ---
 4 files changed, 11 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0023b10e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 6eb25d0..bef4da0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1116,7 +1116,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   /**
* Read data from one DataNode.
* @param datanode the datanode from which to read data
-   * @param block the block to read
+   * @param blockStartOffset starting offset in the file
* @param startInBlk the startInBlk offset of the block
* @param endInBlk the endInBlk offset of the block
* @param buf the given byte array into which the data is read
@@ -1146,7 +1146,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   BlockReader reader = null;
   try {
 DFSClientFaultInjector.get().fetchFromDatanodeException();
-reader = getBlockReader(block, start, len, datanode.addr,
+reader = getBlockReader(block, startInBlk, len, datanode.addr,
 datanode.storageType, datanode.info);
 for (int i = 0; i  offsets.length; i++) {
   int nread = reader.readAll(buf, offsets[i], lengths[i]);
@@ -1203,8 +1203,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
* with each other.
*/
   private void checkReadPortions(int[] offsets, int[] lengths, int totalLen) {
-Preconditions.checkArgument(offsets.length == lengths.length 
-offsets.length  0);
+Preconditions.checkArgument(offsets.length == lengths.length  
offsets.length  0);
 int sum = 0;
 for (int i = 0; i  lengths.length; i++) {
   if (i  0) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0023b10e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 6842267..c930187 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -124,10 +124,7 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
 for (short i = 0; i  numAllBlocks; i++) {
   StripedDataStreamer streamer = new StripedDataStreamer(stat, null,
   dfsClient, src, progress, checksum, cachingStrategy, 
byteArrayManager,
-  i, stripeBlocks);
-  if (favoredNodes != null  favoredNodes.length != 0) {
-streamer.setFavoredNodes(favoredNodes);
-  }
+  i, stripeBlocks, favoredNodes);
   s.add(streamer);
 }
 streamers = Collections.unmodifiableList(s);
@@ -316,7 +313,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   return;
 }
 for (StripedDataStreamer streamer : streamers) {
-  streamer.setLastException(new IOException(Lease timeout of 
+  streamer.getLastException().set(new IOException(Lease timeout of 
   + (dfsClient.getConf().getHdfsTimeout()/1000) +
seconds expired.));
 }
@@ -414,12 +411,8 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   @Override
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
-  IOException e = getLeadingStreamer().getLastException().getAndSet(null);
-  if (e 

[32/50] hadoop git commit: HDFS-8024. Erasure Coding: ECworker frame, basics, bootstraping and configuration. (Contributed by Uma Maheswara Rao G)

2015-05-04 Thread zhz
HDFS-8024. Erasure Coding: ECworker frame, basics, bootstraping and 
configuration. (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aefddb02
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aefddb02
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aefddb02

Branch: refs/heads/HDFS-7285
Commit: aefddb0212021b905d3dbe1d31a8c58e9b0213bc
Parents: 754e62a
Author: Uma Maheswara Rao G umamah...@apache.org
Authored: Wed Apr 22 19:30:14 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:28 2015 -0700

--
 .../erasurecode/coder/AbstractErasureCoder.java |  2 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  7 ++
 .../hdfs/server/datanode/BPOfferService.java|  6 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 10 +++
 .../erasurecode/ErasureCodingWorker.java| 83 
 .../src/main/proto/DatanodeProtocol.proto   |  2 +
 7 files changed, 112 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aefddb02/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index e5bf11a..7403e35 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -66,7 +66,7 @@ public abstract class AbstractErasureCoder
* @param isEncoder
* @return raw coder
*/
-  protected static RawErasureCoder createRawCoder(Configuration conf,
+  public static RawErasureCoder createRawCoder(Configuration conf,
   String rawCoderFactoryKey, boolean isEncoder) {
 
 if (conf == null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aefddb02/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3d86f05..1acde41 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -113,3 +113,6 @@
 
 HDFS-8212. DistributedFileSystem.createErasureCodingZone should pass schema
 in FileSystemLinkResolver. (szetszwo via Zhe Zhang)
+
+HDFS-8024. Erasure Coding: ECworker frame, basics, bootstraping and 
configuration.
+(umamahesh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aefddb02/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 5662a45..609d71a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -973,6 +973,8 @@ public class PBHelper {
   return REG_CMD;
 case BlockIdCommand:
   return PBHelper.convert(proto.getBlkIdCmd());
+case BlockECRecoveryCommand:
+  return PBHelper.convert(proto.getBlkECRecoveryCmd());
 default:
   return null;
 }
@@ -1123,6 +1125,11 @@ public class PBHelper {
   builder.setCmdType(DatanodeCommandProto.Type.BlockIdCommand).
 setBlkIdCmd(PBHelper.convert((BlockIdCommand) datanodeCommand));
   break;
+case DatanodeProtocol.DNA_ERASURE_CODING_RECOVERY:
+  builder.setCmdType(DatanodeCommandProto.Type.BlockECRecoveryCommand)
+  .setBlkECRecoveryCmd(
+  convert((BlockECRecoveryCommand) datanodeCommand));
+  break;
 case DatanodeProtocol.DNA_UNKNOWN: //Not expected
 default:
   builder.setCmdType(DatanodeCommandProto.Type.NullDatanodeCommand);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aefddb02/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java
--
diff --git 

[21/50] hadoop git commit: HDFS-7994. Detect if resevered EC Block ID is already used during namenode startup. Contributed by Hui Zheng

2015-05-04 Thread zhz
HDFS-7994. Detect if resevered EC Block ID is already used during namenode 
startup. Contributed by Hui Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4c3cd827
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4c3cd827
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4c3cd827

Branch: refs/heads/HDFS-7285
Commit: 4c3cd82708fec0ada8384d08b55090cf8fdd2588
Parents: e4d9bfb
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Apr 16 13:16:37 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:25 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   5 +-
 .../server/blockmanagement/BlockManager.java|  42 -
 .../hdfs/server/namenode/FSEditLogLoader.java   |   4 +-
 .../hdfs/server/namenode/FSImageFormat.java |   6 +-
 .../server/namenode/FSImageFormatPBINode.java   |   2 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java| 106 
 .../hdfs/server/namenode/TestFSImage.java   | 169 ++-
 8 files changed, 321 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c3cd827/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index b9fc6fa..78ca6d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -81,4 +81,7 @@
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
 separate erasurecoding proto file (Rakesh R via vinayakumarb)
 
-HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
\ No newline at end of file
+HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
+
+HDFS-7994. Detect if resevered EC Block ID is already used during namenode
+startup. (Hui Zheng via szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4c3cd827/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 841ec92..4c3a007 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -273,6 +273,9 @@ public class BlockManager {
   /** Check whether name system is running before terminating */
   private boolean checkNSRunning = true;
 
+  /** Check whether there are any non-EC blocks using StripedID */
+  private boolean hasNonEcBlockUsingStripedID = false;
+
   public BlockManager(final Namesystem namesystem, final Configuration conf)
 throws IOException {
 this.namesystem = namesystem;
@@ -2913,6 +2916,24 @@ public class BlockManager {
   }
 
   /**
+   * Get the value of whether there are any non-EC blocks using StripedID.
+   *
+   * @return Returns the value of whether there are any non-EC blocks using 
StripedID.
+   */
+  public boolean hasNonEcBlockUsingStripedID(){
+return hasNonEcBlockUsingStripedID;
+  }
+
+  /**
+   * Set the value of whether there are any non-EC blocks using StripedID.
+   *
+   * @param has - the value of whether there are any non-EC blocks using 
StripedID.
+   */
+  public void hasNonEcBlockUsingStripedID(boolean has){
+hasNonEcBlockUsingStripedID = has;
+  }
+
+  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
@@ -3508,8 +3529,10 @@ public class BlockManager {
 if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
   info = blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId(;
-}
-if (info == null) {
+  if ((info == null)  hasNonEcBlockUsingStripedID()){
+info = blocksMap.getStoredBlock(block);
+  }
+} else {
   info = blocksMap.getStoredBlock(block);
 }
 return info;
@@ -3683,6 +3706,21 @@ public class BlockManager {
 return blocksMap.addBlockCollection(block, bc);
   }
 
+  /**
+   * Do some check when adding a block to blocksmap.
+   * For HDFS-7994 to check whether then block is a NonEcBlockUsingStripedID.
+   *
+   */
+  public BlockInfo 

[15/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by Zhe Zhang.

2015-05-04 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c9a57b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c9a57b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c9a57b0

Branch: refs/heads/HDFS-7285
Commit: 6c9a57b09218472ed6894403df20e1d4dab1df6e
Parents: 3d96ae6
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 10:56:24 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:23 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSInputStream.java |  4 ++--
 .../apache/hadoop/hdfs/DFSStripedInputStream.java   | 16 +---
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java  |  3 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  5 +++--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  3 ++-
 5 files changed, 18 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c9a57b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 79bbd54..9104f84 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1106,7 +1106,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   int offset, MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
 final int length = (int) (end - start + 1);
-actualGetFromOneDataNode(datanode, block, start, end, buf,
+actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf,
 new int[]{offset}, new int[]{length}, corruptedBlockMap);
   }
 
@@ -1125,7 +1125,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
*  block replica
*/
   void actualGetFromOneDataNode(final DNAddrPair datanode,
-  LocatedBlock block, final long startInBlk, final long endInBlk,
+  long blockStartOffset, final long startInBlk, final long endInBlk,
   byte[] buf, int[] offsets, int[] lengths,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c9a57b0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 077b0f8..8a431b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -224,7 +224,7 @@ public class DFSStripedInputStream extends DFSInputStream {
* Real implementation of pread.
*/
   @Override
-  protected void fetchBlockByteRange(LocatedBlock block, long start,
+  protected void fetchBlockByteRange(long blockStartOffset, long start,
   long end, byte[] buf, int offset,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
@@ -234,7 +234,7 @@ public class DFSStripedInputStream extends DFSInputStream {
 int len = (int) (end - start + 1);
 
 // Refresh the striped block group
-block = getBlockGroupAt(block.getStartOffset());
+LocatedBlock block = getBlockGroupAt(blockStartOffset);
 assert block instanceof LocatedStripedBlock : NameNode +
  should return a LocatedStripedBlock for a striped file;
 LocatedStripedBlock blockGroup = (LocatedStripedBlock) block;
@@ -254,9 +254,11 @@ public class DFSStripedInputStream extends DFSInputStream {
   DatanodeInfo loc = blks[i].getLocations()[0];
   StorageType type = blks[i].getStorageTypes()[0];
   DNAddrPair dnAddr = new DNAddrPair(loc, NetUtils.createSocketAddr(
-  loc.getXferAddr(dfsClient.getConf().connectToDnViaHostname)), type);
-  CallableVoid readCallable = getFromOneDataNode(dnAddr, blks[i],
-  rp.startOffsetInBlock, rp.startOffsetInBlock + rp.readLength - 1, 
buf,
+  loc.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname())),
+  type);
+  CallableVoid readCallable = getFromOneDataNode(dnAddr,
+  

[40/50] hadoop git commit: HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated as Idempotent (Contributed by Vinayakumar B)

2015-05-04 Thread zhz
HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated as 
Idempotent (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91ff556c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91ff556c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91ff556c

Branch: refs/heads/HDFS-7285
Commit: 91ff556cc541e192f42d62a626d45eb280845833
Parents: f7064bb
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 28 14:24:17 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:30 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 -
 .../apache/hadoop/hdfs/protocol/ClientProtocol.java | 16 
 2 files changed, 12 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91ff556c/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index c28473b..6c5d7ce 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -136,4 +136,7 @@
 striped layout (Zhe Zhang)
 
 HDFS-8230. Erasure Coding: Ignore 
DatanodeProtocol#DNA_ERASURE_CODING_RECOVERY 
-commands from standbynode if any (vinayakumarb)
\ No newline at end of file
+commands from standbynode if any (vinayakumarb)
+
+HDFS-8189. ClientProtocol#createErasureCodingZone API was wrongly annotated
+as Idempotent (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91ff556c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index bba7697..76e2d12 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1364,14 +1364,6 @@ public interface ClientProtocol {
   long prevId) throws IOException;
 
   /**
-   * Create an erasure coding zone with specified schema, if any, otherwise
-   * default
-   */
-  @Idempotent
-  public void createErasureCodingZone(String src, ECSchema schema)
-  throws IOException;
-
-  /**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by .. For example,
* user.attr.
@@ -1467,6 +1459,14 @@ public interface ClientProtocol {
   public EventBatchList getEditsFromTxid(long txid) throws IOException;
 
   /**
+   * Create an erasure coding zone with specified schema, if any, otherwise
+   * default
+   */
+  @AtMostOnce
+  public void createErasureCodingZone(String src, ECSchema schema)
+  throws IOException;
+
+  /**
* Gets the ECInfo for the specified file/directory
* 
* @param src



[39/50] hadoop git commit: HDFS-8223. Should calculate checksum for parity blocks in DFSStripedOutputStream. Contributed by Yi Liu.

2015-05-04 Thread zhz
HDFS-8223. Should calculate checksum for parity blocks in 
DFSStripedOutputStream. Contributed by Yi Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/254759df
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/254759df
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/254759df

Branch: refs/heads/HDFS-7285
Commit: 254759dfdb59e0aa235fb3dac4be92d6baaea466
Parents: cdd1a78
Author: Jing Zhao ji...@apache.org
Authored: Thu Apr 23 15:48:21 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:29 2015 -0700

--
 .../main/java/org/apache/hadoop/fs/FSOutputSummer.java|  4 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  |  3 +++
 .../org/apache/hadoop/hdfs/DFSStripedOutputStream.java| 10 ++
 3 files changed, 17 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/254759df/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
index bdc5585..a8a7494 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FSOutputSummer.java
@@ -196,6 +196,10 @@ abstract public class FSOutputSummer extends OutputStream {
 return sum.getChecksumSize();
   }
 
+  protected DataChecksum getDataChecksum() {
+return sum;
+  }
+
   protected TraceScope createWriteTraceScope() {
 return NullScope.INSTANCE;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/254759df/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 48791b1..9357e23 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -125,3 +125,6 @@
 
 HDFS-8233. Fix DFSStripedOutputStream#getCurrentBlockGroupBytes when the 
last
 stripe is at the block group boundary. (jing9)
+
+HDFS-8223. Should calculate checksum for parity blocks in 
DFSStripedOutputStream.
+(Yi Liu via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/254759df/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 245dfc1..6842267 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -62,6 +62,8 @@ public class DFSStripedOutputStream extends DFSOutputStream {
*/
   private final ECInfo ecInfo;
   private final int cellSize;
+  // checksum buffer, we only need to calculate checksum for parity blocks
+  private byte[] checksumBuf;
   private ByteBuffer[] cellBuffers;
 
   private final short numAllBlocks;
@@ -99,6 +101,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
 
 checkConfiguration();
 
+checksumBuf = new byte[getChecksumSize() * (cellSize / bytesPerChecksum)];
 cellBuffers = new ByteBuffer[numAllBlocks];
 ListBlockingQueueLocatedBlock stripeBlocks = new ArrayList();
 
@@ -179,6 +182,10 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   private ListDFSPacket generatePackets(ByteBuffer byteBuffer)
   throws IOException{
 ListDFSPacket packets = new ArrayList();
+assert byteBuffer.hasArray();
+getDataChecksum().calculateChunkedSums(byteBuffer.array(), 0,
+byteBuffer.remaining(), checksumBuf, 0);
+int ckOff = 0;
 while (byteBuffer.remaining()  0) {
   DFSPacket p = createPacket(packetSize, chunksPerPacket,
   streamer.getBytesCurBlock(),
@@ -186,6 +193,9 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   int maxBytesToPacket = p.getMaxChunks() * bytesPerChecksum;
   int toWrite = byteBuffer.remaining()  maxBytesToPacket ?
   maxBytesToPacket: byteBuffer.remaining();
+  int ckLen = ((toWrite - 1) / bytesPerChecksum + 1) * getChecksumSize();
+  p.writeChecksum(checksumBuf, ckOff, ckLen);
+  ckOff += ckLen;
   p.writeData(byteBuffer, 

[26/50] hadoop git commit: HADOOP-11841. Remove unused ecschema-def.xml files.

2015-05-04 Thread zhz
HADOOP-11841. Remove unused ecschema-def.xml files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/647173e8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/647173e8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/647173e8

Branch: refs/heads/HDFS-7285
Commit: 647173e855719b804b04ba96562cb221c549abd5
Parents: d825289
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Fri Apr 17 16:07:07 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:26 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 ++
 .../src/main/conf/ecschema-def.xml  | 35 ---
 .../hadoop/fs/CommonConfigurationKeys.java  |  5 ---
 .../hadoop/io/erasurecode/SchemaLoader.java | 36 +++-
 .../hadoop/io/erasurecode/TestSchemaLoader.java | 12 ++-
 5 files changed, 25 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/647173e8/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b850e11..9749270 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -42,3 +42,5 @@
 ( Kai Zheng via vinayakumarb )
   
 HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)
+
+HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/647173e8/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
deleted file mode 100644
index e36d386..000
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-?xml version=1.0?
-
-!--
- 
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- License); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an AS IS BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
---
-
-!--
-Please define your EC schemas here. Note, once these schemas are loaded
-and referenced by EC storage policies, any change to them will be ignored.
-You can modify and remove those not used yet, or add new ones.
---
-
-schemas
-  schema name=RS-10-4
-k10/k
-m4/m
-codecRS/codec
-  /schema
-/schemas
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/647173e8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 8a5211a..bd2a24b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -143,11 +143,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
 
-  public static final String IO_ERASURECODE_SCHEMA_FILE_KEY =
-  io.erasurecode.schema.file;
-  public static final String IO_ERASURECODE_SCHEMA_FILE_DEFAULT =
-  ecschema-def.xml;
-
   /** Use XOR raw coder when possible for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
   io.erasurecode.codec.rs.usexor;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/647173e8/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff 

[16/50] hadoop git commit: HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas loaded in Namenode. (Contributed by Vinayakumar B)

2015-05-04 Thread zhz
HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas 
loaded in Namenode. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3d96ae6a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3d96ae6a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3d96ae6a

Branch: refs/heads/HDFS-7285
Commit: 3d96ae6a1a4e291108c222afab93ab8b4c59c5e7
Parents: 16d6f9a
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Apr 10 15:07:32 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:23 2015 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  |  4 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 11 
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 +++
 .../ClientNamenodeProtocolTranslatorPB.java | 26 -
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  5 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  3 +-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   | 57 
 12 files changed, 164 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96ae6a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8c3310e..32077f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -123,12 +123,12 @@ public final class ECSchema {
 
 this.chunkSize = DEFAULT_CHUNK_SIZE;
 try {
-  if (options.containsKey(CHUNK_SIZE_KEY)) {
+  if (this.options.containsKey(CHUNK_SIZE_KEY)) {
 this.chunkSize = Integer.parseInt(options.get(CHUNK_SIZE_KEY));
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  this.options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
is found. It should be an integer);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96ae6a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 753795a..5250dfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -58,4 +58,7 @@
 
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
-HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
+
+HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
+ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3d96ae6a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 8415ac8..197e664 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -164,6 +164,7 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -3111,6 +3112,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECSchema[] getECSchemas() throws 

[17/50] hadoop git commit: HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. Contributed by Li Bo

2015-05-04 Thread zhz
HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. 
Contributed by Li Bo


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16d6f9ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16d6f9ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16d6f9ac

Branch: refs/heads/HDFS-7285
Commit: 16d6f9ac9dfc3c6292421ca45f3e9bc796b57299
Parents: 41128e9
Author: Kai Zheng kai.zh...@intel.com
Authored: Sat Apr 11 01:03:37 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:23 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  13 +-
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  |  26 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java | 439 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  11 +-
 .../apache/hadoop/hdfs/StripedDataStreamer.java | 241 ++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 311 +
 7 files changed, 1031 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16d6f9ac/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 1e695c4..753795a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -56,4 +56,6 @@
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
-HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
+
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16d6f9ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index ae5d3eb..0280d71 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -266,8 +266,14 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
   Preconditions.checkNotNull(stat, HdfsFileStatus should not be null!);
-  final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
-  flag, progress, checksum, favoredNodes);
+  final DFSOutputStream out;
+  if(stat.getReplication() == 0) {
+out = new DFSStripedOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  } else {
+out = new DFSOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  }
   out.start();
   return out;
 } finally {
@@ -347,6 +353,9 @@ public class DFSOutputStream extends FSOutputSummer
   String[] favoredNodes) throws IOException {
 TraceScope scope =
 dfsClient.getPathTraceScope(newStreamForAppend, src);
+   if(stat.getReplication() == 0) {
+  throw new IOException(Not support appending to a striping layout file 
yet.);
+}
 try {
   final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
   progress, lastBlock, stat, checksum, favoredNodes);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16d6f9ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
index 22055c3..9cd1ec1 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
 
@@ -113,6 +114,19 @@ class DFSPacket {
 dataPos += len;
   }
 
+  

[46/50] hadoop git commit: HDFS-8272. Erasure Coding: simplify the retry logic in DFSStripedInputStream (stateful read). Contributed by Jing Zhao

2015-05-04 Thread zhz
HDFS-8272. Erasure Coding: simplify the retry logic in DFSStripedInputStream 
(stateful read). Contributed by Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aae54522
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aae54522
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aae54522

Branch: refs/heads/HDFS-7285
Commit: aae54522e29e774e923bc1c62a78a432bd5fabb3
Parents: 0023b10
Author: Zhe Zhang z...@apache.org
Authored: Wed Apr 29 15:53:31 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:31 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/DFSStripedInputStream.java  | 336 ---
 2 files changed, 150 insertions(+), 189 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aae54522/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9b4bf24..6a9bdee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -143,3 +143,6 @@
 
 HDFS-8235. Erasure Coding: Create DFSStripedInputStream in DFSClient#open.
 (Kai Sasaki via jing9)
+
+HDFS-8272. Erasure Coding: simplify the retry logic in 
DFSStripedInputStream 
+(stateful read). (Jing Zhao via Zhe Zhang)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aae54522/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index f6f7ed2..3da7306 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -22,11 +22,8 @@ import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.*;
 import 
org.apache.hadoop.hdfs.protocol.datatransfer.InvalidEncryptionKeyException;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.CachingStrategy;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.token.Token;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
 import org.apache.htrace.TraceScope;
@@ -126,23 +123,42 @@ public class DFSStripedInputStream extends DFSInputStream 
{
 return results;
   }
 
+  private static class ReaderRetryPolicy {
+private int fetchEncryptionKeyTimes = 1;
+private int fetchTokenTimes = 1;
+
+void refetchEncryptionKey() {
+  fetchEncryptionKeyTimes--;
+}
+
+void refetchToken() {
+  fetchTokenTimes--;
+}
+
+boolean shouldRefetchEncryptionKey() {
+  return fetchEncryptionKeyTimes  0;
+}
+
+boolean shouldRefetchToken() {
+  return fetchTokenTimes  0;
+}
+  }
+
   private final short groupSize = HdfsConstants.NUM_DATA_BLOCKS;
-  private BlockReader[] blockReaders = null;
-  private DatanodeInfo[] currentNodes = null;
+  private final BlockReader[] blockReaders = new BlockReader[groupSize];
+  private final DatanodeInfo[] currentNodes = new DatanodeInfo[groupSize];
   private final int cellSize;
   private final short dataBlkNum;
   private final short parityBlkNum;
-  private final ECInfo ecInfo;
 
-  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum, ECInfo info)
-  throws IOException {
+  DFSStripedInputStream(DFSClient dfsClient, String src, boolean 
verifyChecksum,
+  ECInfo ecInfo) throws IOException {
 super(dfsClient, src, verifyChecksum);
 // ECInfo is restored from NN just before reading striped file.
-assert info != null;
-ecInfo = info;
+assert ecInfo != null;
 cellSize = ecInfo.getSchema().getChunkSize();
-dataBlkNum = (short)ecInfo.getSchema().getNumDataUnits();
-parityBlkNum = (short)ecInfo.getSchema().getNumParityUnits();
+dataBlkNum = (short) ecInfo.getSchema().getNumDataUnits();
+parityBlkNum = (short) ecInfo.getSchema().getNumParityUnits();
 DFSClient.LOG.debug(Creating an striped input stream for file  + src);
   }
 
@@ -162,9 +178,7 @@ public class DFSStripedInputStream extends DFSInputStream {
* When seeking into a new block group, create blockReader for each internal

[20/50] hadoop git commit: HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if this operation fails. Contributed by Rakesh R.

2015-05-04 Thread zhz
HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if 
this operation fails. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5e5022e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5e5022e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5e5022e

Branch: refs/heads/HDFS-7285
Commit: f5e5022ee0470731138e5d00d00c314b9c2a5809
Parents: c743e89
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 11:15:02 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:13:24 2015 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 21 ++--
 1 file changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5e5022e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index a3ae795..5232d64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8170,11 +8170,19 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   SafeModeException, AccessControlException {
 String src = srcArg;
 HdfsFileStatus resultingStat = null;
-checkSuperuserPrivilege();
-checkOperation(OperationCategory.WRITE);
-final byte[][] pathComponents =
-FSDirectory.getPathComponentsForReservedPath(src);
-FSPermissionChecker pc = getPermissionChecker();
+FSPermissionChecker pc = null;
+byte[][] pathComponents = null;
+boolean success = false;
+try {
+  checkSuperuserPrivilege();
+  checkOperation(OperationCategory.WRITE);
+  pathComponents =
+  FSDirectory.getPathComponentsForReservedPath(src);
+  pc = getPermissionChecker();
+} catch (Throwable e) {
+  logAuditEvent(success, createErasureCodingZone, srcArg);
+  throw e;
+}
 writeLock();
 try {
   checkSuperuserPrivilege();
@@ -8188,11 +8196,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
   final INodesInPath iip = dir.getINodesInPath4Write(src, false);
   resultingStat = dir.getAuditFileInfo(iip);
+  success = true;
 } finally {
   writeUnlock();
 }
 getEditLog().logSync();
-logAuditEvent(true, createErasureCodingZone, srcArg, null, 
resultingStat);
+logAuditEvent(success, createErasureCodingZone, srcArg, null, 
resultingStat);
   }
 
   /**



[06/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-05-04 Thread zhz
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb386505
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb386505
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb386505

Branch: refs/heads/HDFS-7285
Commit: fb386505a86d739e034664b0b58236ed9dded08c
Parents: c284ac0
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon May 4 10:11:40 2015 -0700

--
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   4 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  55 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  80 +++-
 .../hadoop/hdfs/DFSStripedInputStream.java  | 367 +++
 .../hdfs/protocol/LocatedStripedBlock.java  |   5 +
 .../blockmanagement/BlockInfoStriped.java   |   6 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  92 -
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 304 +++
 .../namenode/TestRecoverStripedBlocks.java  |  88 +
 10 files changed, 896 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb386505/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 4e8f202..a9596bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -203,4 +203,8 @@ public class LocatedBlock {
 + ; locs= + Arrays.asList(locs)
 + };
   }
+
+  public boolean isStriped() {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb386505/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 09ffd95..5ad043b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -237,6 +237,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
   new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
+  private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
   private final Sampler? traceSampler;
 
   public DfsClientConf getConf() {
@@ -373,6 +374,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (dfsClientConf.getHedgedReadThreadpoolSize()  0) {
   
this.initThreadsNumForHedgedReads(dfsClientConf.getHedgedReadThreadpoolSize());
 }
+numThreads = conf.getInt(
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE,
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+if (numThreads = 0) {
+  LOG.warn(The value of 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE
+  +  must be greater than 0. The current setting is  + numThreads
+  + . Reset it to the default value 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+  numThreads =
+  DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE;
+}
+this.initThreadsNumForStripedReads(numThreads);
 this.saslClient = new SaslDataTransferClient(
   conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
   TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
@@ -3153,11 +3167,52 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
+  
+  /**
+   * Create thread pool for parallel reading in striped layout,
+   * STRIPED_READ_THREAD_POOL, if it does not already exist.
+   * @param num Number of threads for striped reads thread pool.
+   */
+  private void initThreadsNumForStripedReads(int num) {
+assert num  0;
+if (STRIPED_READ_THREAD_POOL != null) {
+  return;
+}
+synchronized (DFSClient.class) {
+

[Hadoop Wiki] Update of 2015MayBugBash by SomeOtherAccount

2015-05-04 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The 2015MayBugBash page has been changed by SomeOtherAccount:
https://wiki.apache.org/hadoop/2015MayBugBash?action=diffrev1=3rev2=4

  TableOfContents(4)
  
+ 
+ {{{#!wiki red/solid
+ IMPORTANT NOTE: Apache has limited QA capabilities. It is extremely important 
that everyone avoids  submitting patches to Jenkins for testing unless they are 
absolutely  certain that all relevant tests will pass.  HDFS unit tests, for 
example, will tie up a test slot for '''over two hours'''.
+ }}}
+ 
  == Information ==
- 
- With over 900 patches not yet reviewed and approved for Apache Hadoop, it's 
time to make some strong progress on the bug list! 
+ With over 900 patches not yet reviewed and approved for Apache Hadoop, it's 
time to make some strong progress on the bug list!
  
  A number of Apache Hadoop committers and Hadoop-related tech companies are 
hosting an Apache Hadoop Community event on Friday, May 8th, after HBaseCon 
2015. You are hereby invited for a fun, daylong event devoted to identifying 
and registering important patches and cleaning up the queue.
  
@@ -18, +22 @@

   1. Hang out on the #hadoop channel on irc.freenode.net .
  
  == Procedures for that day ==
- 
  === Source Code Contributions ===
- 
   Non-committers 
- 
   1. Read through the 
[[https://issues.apache.org/jira/issues/?filter=12331694|To Be Reviewed queue]].
   1. Find a JIRA to work on.
   1. Remove the '''BB2015-05-TBR''' label from that JIRA so that it leaves the 
queue.
-  1. Work through the issues in that JIRA. Work make sure the patch applies 
cleanly, test pasts locally by running against test-patch.sh, any pre-existing 
committer comments are covered, etc.
-  1. When you think it is ready, set the label to '''BB2015-05-RFC'''.
+  1. Work through the issues in that JIRA:
+   1. Make sure the patch applies cleanly
+   1. Fix any pre-existing comments
+  1. '''Before uploading''', test the patch locally using {{{test-patch.sh}}} 
to prevent overloading the QA servers.
+  1. Upload the reworked patch back into JIRA.
+  1. Set the label to '''BB2015-05-RFC'''.
  
  Note that in some cases, the functionality in older patches may already 
exist.  Please close these JIRA, preferably as a duplicate to the JIRA that 
added that functionality or as Invalid with a comment stating that you believe 
the issue is stale and already fixed.
  
   Committers 
- 
   1. Read through the 
[[https://issues.apache.org/jira/issues/?filter=12331695|Ready For Committer 
queue]].
   1. Find a JIRA to work on.
   1. Remove the '''BB2015-05-RFC''' label from that JIRA so that it leaves the 
queue.
   1. Review the patch.  If it needs more work, add a comment and add the''' 
BB2015-05-TBR''' label so that it goes back into the non-committer queue.
   1. Commit the patch as per usual if it is ready to go.
- 
  
  === Non-source Code Contributions ===
  The vast majority of Hadoop content, including almost all of the 
documentation is part of the source tree.  However, there are multiple ways in 
which those who are unfamiliar with developer environments can contribute:


hadoop git commit: HDFS-8316. Erasure coding: refactor EC constants to be consistent with HDFS-8249. Contributed by Zhe Zhang.

2015-05-04 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 c4fcea80f - 35bb92fe2


HDFS-8316. Erasure coding: refactor EC constants to be consistent with 
HDFS-8249. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35bb92fe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35bb92fe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35bb92fe

Branch: refs/heads/HDFS-7285
Commit: 35bb92fe2afda92a5ba41d48d41aac6723a141b3
Parents: c4fcea8
Author: Jing Zhao ji...@apache.org
Authored: Mon May 4 11:24:35 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon May 4 11:24:35 2015 -0700

--
 .../org/apache/hadoop/hdfs/protocol/HdfsConstants.java   | 11 +++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt |  3 +++
 .../org/apache/hadoop/hdfs/DFSStripedOutputStream.java   |  2 +-
 .../hdfs/server/blockmanagement/BlockIdManager.java  |  4 ++--
 .../blockmanagement/SequentialBlockGroupIdGenerator.java |  4 ++--
 .../hadoop/hdfs/server/common/HdfsServerConstants.java   |  5 -
 .../hdfs/server/namenode/TestAddStripedBlocks.java   |  4 ++--
 .../hdfs/server/namenode/TestStripedINodeFile.java   |  6 +++---
 8 files changed, 28 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35bb92fe/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 58c7ea1..32ca81c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -75,6 +75,17 @@ public final class HdfsConstants {
   public static final String CLIENT_NAMENODE_PROTOCOL_NAME =
   org.apache.hadoop.hdfs.protocol.ClientProtocol;
 
+  /*
+   * These values correspond to the values used by the system default erasure
+   * coding schema.
+   * TODO: to be removed once all places use schema.
+   */
+
+  public static final byte NUM_DATA_BLOCKS = 6;
+  public static final byte NUM_PARITY_BLOCKS = 3;
+  // The chunk size for striped block which is used by erasure coding
+  public static final int BLOCK_STRIPED_CELL_SIZE = 256 * 1024;
+
   // SafeMode actions
   public enum SafeModeAction {
 SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35bb92fe/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 145494f..e30b2ed 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -158,3 +158,6 @@
 
 HDFS-7949. WebImageViewer need support file size calculation with striped 
 blocks. (Rakesh R via Zhe Zhang)
+
+HDFS-8316. Erasure coding: refactor EC constants to be consistent with 
HDFS-8249.
+(Zhe Zhang via jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35bb92fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index 5e2a534..71cdbb9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -419,7 +419,7 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
   @Override
   protected synchronized void closeImpl() throws IOException {
 if (isClosed()) {
-  getLeadingStreamer().getLastException().check();
+  getLeadingStreamer().getLastException().check(true);
   return;
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35bb92fe/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 

[1/2] hadoop git commit: HDFS-8290. WebHDFS calls before namesystem initialization can cause NullPointerException. Contributed by Chris Nauroth.

2015-05-04 Thread cnauroth
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 99b49a818 - a43080a2c
  refs/heads/trunk 8f65c793f - c4578760b


HDFS-8290. WebHDFS calls before namesystem initialization can cause 
NullPointerException. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4578760
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4578760
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4578760

Branch: refs/heads/trunk
Commit: c4578760b67d5b5169949a1b059f4472a268ff1b
Parents: 8f65c79
Author: cnauroth cnaur...@apache.org
Authored: Mon May 4 11:35:04 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon May 4 11:35:04 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../web/resources/NamenodeWebHdfsMethods.java   |  7 ++-
 .../web/resources/TestWebHdfsDataLocality.java  | 20 +++-
 3 files changed, 28 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4578760/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e525800..a365b86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -610,6 +610,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8309. Skip unit test using DataNodeTestUtils#injectDataDirFailure() 
on Windows.
 (xyao)
 
+HDFS-8290. WebHDFS calls before namesystem initialization can cause
+NullPointerException. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4578760/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 2c1148e..d33721c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -164,7 +165,11 @@ public class NamenodeWebHdfsMethods {
   static DatanodeInfo chooseDatanode(final NameNode namenode,
   final String path, final HttpOpParam.Op op, final long openOffset,
   final long blocksize, final String excludeDatanodes) throws IOException {
-final BlockManager bm = namenode.getNamesystem().getBlockManager();
+FSNamesystem fsn = namenode.getNamesystem();
+if (fsn == null) {
+  throw new IOException(Namesystem has not been intialized yet.);
+}
+final BlockManager bm = fsn.getBlockManager();
 
 HashSetNode excludes = new HashSetNode();
 if (excludeDatanodes != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4578760/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
index 077361c..15e1c04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.web.resources;
 
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
 
@@ -42,7 +45,9 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import 

[2/2] hadoop git commit: HDFS-8290. WebHDFS calls before namesystem initialization can cause NullPointerException. Contributed by Chris Nauroth.

2015-05-04 Thread cnauroth
HDFS-8290. WebHDFS calls before namesystem initialization can cause 
NullPointerException. Contributed by Chris Nauroth.

(cherry picked from commit c4578760b67d5b5169949a1b059f4472a268ff1b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a43080a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a43080a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a43080a2

Branch: refs/heads/branch-2
Commit: a43080a2cffe93ea2c3955e6ac01a59b2aba66e6
Parents: 99b49a8
Author: cnauroth cnaur...@apache.org
Authored: Mon May 4 11:35:04 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon May 4 11:35:18 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../web/resources/NamenodeWebHdfsMethods.java   |  7 ++-
 .../web/resources/TestWebHdfsDataLocality.java  | 20 +++-
 3 files changed, 28 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43080a2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index aa47754..5738541 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -286,6 +286,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8309. Skip unit test using DataNodeTestUtils#injectDataDirFailure() 
on Windows.
 (xyao)
 
+HDFS-8290. WebHDFS calls before namesystem initialization can cause
+NullPointerException. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43080a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 2c1148e..d33721c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -164,7 +165,11 @@ public class NamenodeWebHdfsMethods {
   static DatanodeInfo chooseDatanode(final NameNode namenode,
   final String path, final HttpOpParam.Op op, final long openOffset,
   final long blocksize, final String excludeDatanodes) throws IOException {
-final BlockManager bm = namenode.getNamesystem().getBlockManager();
+FSNamesystem fsn = namenode.getNamesystem();
+if (fsn == null) {
+  throw new IOException(Namesystem has not been intialized yet.);
+}
+final BlockManager bm = fsn.getBlockManager();
 
 HashSetNode excludes = new HashSetNode();
 if (excludeDatanodes != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43080a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
index 077361c..15e1c04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.web.resources;
 
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
 
@@ -42,7 +45,9 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 

[Hadoop Wiki] Update of 2015MayBugBash by SomeOtherAccount

2015-05-04 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The 2015MayBugBash page has been changed by SomeOtherAccount:
https://wiki.apache.org/hadoop/2015MayBugBash?action=diffrev1=4rev2=5

   1. Find a JIRA to work on.
   1. Remove the '''BB2015-05-TBR''' label from that JIRA so that it leaves the 
queue.
   1. Work through the issues in that JIRA:
+   1. Name the patch file (something).patch
-   1. Make sure the patch applies cleanly
+   1. Verify the patch applies cleanly
1. Fix any pre-existing comments
-  1. '''Before uploading''', test the patch locally using {{{test-patch.sh}}} 
to prevent overloading the QA servers.
+   1. Test the patch locally using {{{test-patch.sh}}}
+  1. '''Before uploading''', did you run {{{test-patch.sh}}}?
   1. Upload the reworked patch back into JIRA.
   1. Set the label to '''BB2015-05-RFC'''.
  


hadoop git commit: MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge. Contributed by Gera Shegalov

2015-05-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk c4578760b - 7dc3c1203


MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge. 
Contributed by Gera Shegalov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dc3c120
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dc3c120
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dc3c120

Branch: refs/heads/trunk
Commit: 7dc3c1203d1ab14c09d0aaf0869a5bcdfafb0a5a
Parents: c457876
Author: Jason Lowe jl...@apache.org
Authored: Mon May 4 19:02:39 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon May 4 19:02:39 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../mapreduce/task/reduce/MergeManagerImpl.java | 47 +++-
 .../mapreduce/task/reduce/TestMergeManager.java | 29 
 3 files changed, 57 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc3c120/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 19f95fc..f7e3bde 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -365,6 +365,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6349. Fix typo in property org.apache.hadoop.mapreduce.
 lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. (Ray Chiang via ozawa)
 
+MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge
+(Gera Shegalov via jlowe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dc3c120/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index 8bf17ef..f788707 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -93,8 +93,10 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
   
   SetCompressAwarePath onDiskMapOutputs = new TreeSetCompressAwarePath();
   private final OnDiskMerger onDiskMerger;
-  
-  private final long memoryLimit;
+
+  @VisibleForTesting
+  final long memoryLimit;
+
   private long usedMemory;
   private long commitMemory;
   private final long maxSingleShuffleLimit;
@@ -167,11 +169,10 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 }
 
 // Allow unit tests to fix Runtime memory
-this.memoryLimit = 
-  (long)(jobConf.getLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
-  Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
-* maxInMemCopyUse);
- 
+this.memoryLimit = (long)(jobConf.getLong(
+MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
+Runtime.getRuntime().maxMemory()) * maxInMemCopyUse);
+
 this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100);
 
 final float singleShuffleMemoryLimitPercent =
@@ -202,7 +203,7 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 
 if (this.maxSingleShuffleLimit = this.mergeThreshold) {
   throw new RuntimeException(Invalid configuration: 
-  + maxSingleShuffleLimit should be less than mergeThreshold
+  + maxSingleShuffleLimit should be less than mergeThreshold 
   + maxSingleShuffleLimit:  + this.maxSingleShuffleLimit
   + mergeThreshold:  + this.mergeThreshold);
 }
@@ -668,24 +669,26 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 }
   }
 
-  private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs,
-   ListInMemoryMapOutputK,V 
inMemoryMapOutputs,
-   ListCompressAwarePath onDiskMapOutputs
-   ) throws IOException {
-LOG.info(finalMerge called with  + 
- inMemoryMapOutputs.size() +  in-memory map-outputs and  + 
- onDiskMapOutputs.size() +  on-disk map-outputs);
-
+  @VisibleForTesting
+  final long getMaxInMemReduceLimit() {
 final float maxRedPer =
-  

hadoop git commit: MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge. Contributed by Gera Shegalov (cherry picked from commit 7dc3c1203d1ab14c09d0aaf0869a5bcdfafb0a5a)

2015-05-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a43080a2c - 87c2d915f


MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge. 
Contributed by Gera Shegalov
(cherry picked from commit 7dc3c1203d1ab14c09d0aaf0869a5bcdfafb0a5a)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87c2d915
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87c2d915
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87c2d915

Branch: refs/heads/branch-2
Commit: 87c2d915f1cc799cb4020c945c04d3ecb82ee963
Parents: a43080a
Author: Jason Lowe jl...@apache.org
Authored: Mon May 4 19:02:39 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon May 4 19:03:43 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../mapreduce/task/reduce/MergeManagerImpl.java | 47 +++-
 .../mapreduce/task/reduce/TestMergeManager.java | 29 
 3 files changed, 57 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c2d915/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 4ad39bc..a7893d6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -113,6 +113,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6349. Fix typo in property org.apache.hadoop.mapreduce.
 lib.chain.Chain.REDUCER_INPUT_VALUE_CLASS. (Ray Chiang via ozawa)
 
+MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge
+(Gera Shegalov via jlowe)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/87c2d915/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
index 8bf17ef..f788707 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java
@@ -93,8 +93,10 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
   
   SetCompressAwarePath onDiskMapOutputs = new TreeSetCompressAwarePath();
   private final OnDiskMerger onDiskMerger;
-  
-  private final long memoryLimit;
+
+  @VisibleForTesting
+  final long memoryLimit;
+
   private long usedMemory;
   private long commitMemory;
   private final long maxSingleShuffleLimit;
@@ -167,11 +169,10 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 }
 
 // Allow unit tests to fix Runtime memory
-this.memoryLimit = 
-  (long)(jobConf.getLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
-  Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE))
-* maxInMemCopyUse);
- 
+this.memoryLimit = (long)(jobConf.getLong(
+MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES,
+Runtime.getRuntime().maxMemory()) * maxInMemCopyUse);
+
 this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100);
 
 final float singleShuffleMemoryLimitPercent =
@@ -202,7 +203,7 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 
 if (this.maxSingleShuffleLimit = this.mergeThreshold) {
   throw new RuntimeException(Invalid configuration: 
-  + maxSingleShuffleLimit should be less than mergeThreshold
+  + maxSingleShuffleLimit should be less than mergeThreshold 
   + maxSingleShuffleLimit:  + this.maxSingleShuffleLimit
   + mergeThreshold:  + this.mergeThreshold);
 }
@@ -668,24 +669,26 @@ public class MergeManagerImplK, V implements 
MergeManagerK, V {
 }
   }
 
-  private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs,
-   ListInMemoryMapOutputK,V 
inMemoryMapOutputs,
-   ListCompressAwarePath onDiskMapOutputs
-   ) throws IOException {
-LOG.info(finalMerge called with  + 
- inMemoryMapOutputs.size() +  in-memory map-outputs and  + 
- onDiskMapOutputs.size() +  on-disk map-outputs);
-
+  @VisibleForTesting
+  final long 

hadoop git commit: MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8. Contributed by Akira AJISAKA.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 f71c49c5e - 4e96175b3


MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8. Contributed 
by Akira AJISAKA.

(cherry picked from commit 551615fa13f65ae996bae9c1bacff189539b6557)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e96175b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e96175b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e96175b

Branch: refs/heads/branch-2
Commit: 4e96175b334925466a9790e6ca20d0ec7d350791
Parents: f71c49c
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue May 5 10:23:13 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue May 5 10:23:25 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|3 +
 .../lib/input/CombineFileInputFormat.java   |   26 +-
 .../lib/input/TestCombineFileInputFormat.java   | 1138 --
 3 files changed, 805 insertions(+), 362 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e96175b/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 5d605a7..4944894 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -116,6 +116,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge
 (Gera Shegalov via jlowe)
 
+MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8.
+(Akira AJISAKA via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e96175b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
index 040c54b..b2b7656 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
@@ -29,7 +29,6 @@ import java.util.HashMap;
 import java.util.Set;
 import java.util.Iterator;
 import java.util.Map;
-import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -289,6 +288,26 @@ public abstract class CombineFileInputFormatK, V
  maxSize, minSizeNode, minSizeRack, splits);
   }
 
+  /**
+   * Process all the nodes and create splits that are local to a node.
+   * Generate one split per node iteration, and walk over nodes multiple times
+   * to distribute the splits across nodes.
+   * p
+   * Note: The order of processing the nodes is undetermined because the
+   * implementation of nodeToBlocks is {@link java.util.HashMap} and its order
+   * of the entries is undetermined.
+   * @param nodeToBlocks Mapping from a node to the list of blocks that
+   * it contains.
+   * @param blockToNodes Mapping from a block to the nodes on which
+   * it has replicas.
+   * @param rackToBlocks Mapping from a rack name to the list of blocks it has.
+   * @param totLength Total length of the input files.
+   * @param maxSize Max size of each split.
+   *If set to 0, disable smoothing load.
+   * @param minSizeNode Minimum split size per node.
+   * @param minSizeRack Minimum split size per rack.
+   * @param splits New splits created by this method are added to the list.
+   */
   @VisibleForTesting
   void createSplits(MapString, SetOneBlockInfo nodeToBlocks,
  MapOneBlockInfo, String[] blockToNodes,
@@ -309,11 +328,6 @@ public abstract class CombineFileInputFormatK, V
 SetString completedNodes = new HashSetString();
 
 while(true) {
-  // it is allowed for maxSize to be 0. Disable smoothing load for such 
cases
-
-  // process all nodes and create splits that are local to a node. Generate
-  // one split per node iteration, and walk over nodes multiple times to
-  // distribute the splits across nodes. 
   for (IteratorMap.EntryString, SetOneBlockInfo iter = nodeToBlocks
   .entrySet().iterator(); iter.hasNext();) {
 

hadoop git commit: MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8. Contributed by Akira AJISAKA.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk d701acc9c - 551615fa1


MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8. Contributed 
by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/551615fa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/551615fa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/551615fa

Branch: refs/heads/trunk
Commit: 551615fa13f65ae996bae9c1bacff189539b6557
Parents: d701acc
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue May 5 10:23:13 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue May 5 10:23:13 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt|3 +
 .../lib/input/CombineFileInputFormat.java   |   26 +-
 .../lib/input/TestCombineFileInputFormat.java   | 1138 --
 3 files changed, 805 insertions(+), 362 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/551615fa/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 481757a..002fbe6 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -368,6 +368,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-5649. Reduce cannot use more than 2G memory for the final merge
 (Gera Shegalov via jlowe)
 
+MAPREDUCE-6165. [JDK8] TestCombineFileInputFormat failed on JDK8.
+(Akira AJISAKA via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/551615fa/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
index 040c54b..b2b7656 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/CombineFileInputFormat.java
@@ -29,7 +29,6 @@ import java.util.HashMap;
 import java.util.Set;
 import java.util.Iterator;
 import java.util.Map;
-import java.util.Map.Entry;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
@@ -289,6 +288,26 @@ public abstract class CombineFileInputFormatK, V
  maxSize, minSizeNode, minSizeRack, splits);
   }
 
+  /**
+   * Process all the nodes and create splits that are local to a node.
+   * Generate one split per node iteration, and walk over nodes multiple times
+   * to distribute the splits across nodes.
+   * p
+   * Note: The order of processing the nodes is undetermined because the
+   * implementation of nodeToBlocks is {@link java.util.HashMap} and its order
+   * of the entries is undetermined.
+   * @param nodeToBlocks Mapping from a node to the list of blocks that
+   * it contains.
+   * @param blockToNodes Mapping from a block to the nodes on which
+   * it has replicas.
+   * @param rackToBlocks Mapping from a rack name to the list of blocks it has.
+   * @param totLength Total length of the input files.
+   * @param maxSize Max size of each split.
+   *If set to 0, disable smoothing load.
+   * @param minSizeNode Minimum split size per node.
+   * @param minSizeRack Minimum split size per rack.
+   * @param splits New splits created by this method are added to the list.
+   */
   @VisibleForTesting
   void createSplits(MapString, SetOneBlockInfo nodeToBlocks,
  MapOneBlockInfo, String[] blockToNodes,
@@ -309,11 +328,6 @@ public abstract class CombineFileInputFormatK, V
 SetString completedNodes = new HashSetString();
 
 while(true) {
-  // it is allowed for maxSize to be 0. Disable smoothing load for such 
cases
-
-  // process all nodes and create splits that are local to a node. Generate
-  // one split per node iteration, and walk over nodes multiple times to
-  // distribute the splits across nodes. 
   for (IteratorMap.EntryString, SetOneBlockInfo iter = nodeToBlocks
   .entrySet().iterator(); iter.hasNext();) {
 Map.EntryString, SetOneBlockInfo one = iter.next();


hadoop git commit: HADOOP-11916. TestStringUtils#testLowerAndUpperStrings failed on MAC due to a JVM bug. Contributed by Ming Ma.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk 551615fa1 - 338e88a19


HADOOP-11916. TestStringUtils#testLowerAndUpperStrings failed on MAC due to a 
JVM bug. Contributed by Ming Ma.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/338e88a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/338e88a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/338e88a1

Branch: refs/heads/trunk
Commit: 338e88a19eeb01364c7f5bcdc5f4b5c35d53852d
Parents: 551615f
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue May 5 12:39:24 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue May 5 12:39:24 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/test/java/org/apache/hadoop/util/TestStringUtils.java| 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/338e88a1/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 2bf790a..0b3c971 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -596,6 +596,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11900. Add failIfNoTests=false to hadoop-build-tools pom. (gera)
 
+HADOOP-11916. TestStringUtils#testLowerAndUpperStrings failed on MAC
+due to a JVM bug. (Ming Ma via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/338e88a1/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 515c3e0..5b0715f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -37,6 +37,7 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.test.UnitTestcaseTimeLimit;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
+import org.junit.Assume;
 import org.junit.Test;
 
 public class TestStringUtils extends UnitTestcaseTimeLimit {
@@ -416,6 +417,9 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
 
   @Test
   public void testLowerAndUpperStrings() {
+// Due to java bug 
http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8047340,
+// The test will fail with Turkish locality on Mac OS.
+Assume.assumeTrue(Shell.LINUX);
 Locale defaultLocale = Locale.getDefault();
 try {
   Locale.setDefault(new Locale(tr, TR));



hadoop git commit: HADOOP-11916. TestStringUtils#testLowerAndUpperStrings failed on MAC due to a JVM bug. Contributed by Ming Ma.

2015-05-04 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 4e96175b3 - aefadb838


HADOOP-11916. TestStringUtils#testLowerAndUpperStrings failed on MAC due to a 
JVM bug. Contributed by Ming Ma.

(cherry picked from commit 338e88a19eeb01364c7f5bcdc5f4b5c35d53852d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/aefadb83
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/aefadb83
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/aefadb83

Branch: refs/heads/branch-2
Commit: aefadb838c57d2156aa541cc21182a08f1abaa39
Parents: 4e96175
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Tue May 5 12:39:24 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Tue May 5 12:39:43 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt  | 3 +++
 .../src/test/java/org/apache/hadoop/util/TestStringUtils.java| 4 
 2 files changed, 7 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/aefadb83/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 945c9db..fd5bb4b 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -141,6 +141,9 @@ Release 2.8.0 - UNRELEASED
 
 HADOOP-11900. Add failIfNoTests=false to hadoop-build-tools pom. (gera)
 
+HADOOP-11916. TestStringUtils#testLowerAndUpperStrings failed on MAC
+due to a JVM bug. (Ming Ma via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/aefadb83/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
index 515c3e0..5b0715f 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/util/TestStringUtils.java
@@ -37,6 +37,7 @@ import java.util.regex.Pattern;
 
 import org.apache.hadoop.test.UnitTestcaseTimeLimit;
 import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix;
+import org.junit.Assume;
 import org.junit.Test;
 
 public class TestStringUtils extends UnitTestcaseTimeLimit {
@@ -416,6 +417,9 @@ public class TestStringUtils extends UnitTestcaseTimeLimit {
 
   @Test
   public void testLowerAndUpperStrings() {
+// Due to java bug 
http://bugs.java.com/bugdatabase/view_bug.do?bug_id=8047340,
+// The test will fail with Turkish locality on Mac OS.
+Assume.assumeTrue(Shell.LINUX);
 Locale defaultLocale = Locale.getDefault();
 try {
   Locale.setDefault(new Locale(tr, TR));



hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B) Reverted earlier commit

2015-05-04 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 338e88a19 - 318081ccd


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)
Reverted earlier commit


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/318081cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/318081cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/318081cc

Branch: refs/heads/trunk
Commit: 318081ccd7af1ec02ec18f35ea95c579326be728
Parents: 338e88a
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 5 11:05:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 5 11:05:37 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 ---
 .../apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java | 4 
 2 files changed, 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/318081cc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 365b005..cd9b7b8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -664,9 +664,6 @@ Release 2.7.1 - UNRELEASED
 HDFS-8179. DFSClient#getServerDefaults returns null within 1
 hour of system start. (Xiaoyu Yao via Arpit Agarwal)
 
-HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
-goes for infinite loop (vinayakumarb)
-
 HDFS-8163. Using monotonicNow for block report scheduling causes
 test failures on recently restarted systems. (Arpit Agarwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/318081cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index 991b56d..7155eae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -60,9 +59,6 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
 
 try {
   bpNamenode.reportBadBlocks(locatedBlock);
-} catch (StandbyException e) {
-  DataNode.LOG.warn(Failed to report bad block  + block
-  +  to standby namenode);
 } catch (IOException e) {
   throw new BPServiceActorActionException(Failed to report bad block 
   + block +  to namenode: );



hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B) Reverted earlier commit

2015-05-04 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 e7b6a076d - 69d837428


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)
Reverted earlier commit

(cherry picked from commit 318081ccd7af1ec02ec18f35ea95c579326be728)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69d83742
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69d83742
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69d83742

Branch: refs/heads/branch-2.7
Commit: 69d837428ca96a445cb662c8be06bf36e7034640
Parents: e7b6a07
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 5 11:05:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 5 11:07:59 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 ---
 .../apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java | 4 
 2 files changed, 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d83742/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 44571b4..2c209cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -44,9 +44,6 @@ Release 2.7.1 - UNRELEASED
 HDFS-8179. DFSClient#getServerDefaults returns null within 1
 hour of system start. (Xiaoyu Yao via Arpit Agarwal)
 
-HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
-goes for infinite loop (vinayakumarb)
-
 HDFS-8163. Using monotonicNow for block report scheduling causes
 test failures on recently restarted systems. (Arpit Agarwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/69d83742/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index 991b56d..7155eae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -60,9 +59,6 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
 
 try {
   bpNamenode.reportBadBlocks(locatedBlock);
-} catch (StandbyException e) {
-  DataNode.LOG.warn(Failed to report bad block  + block
-  +  to standby namenode);
 } catch (IOException e) {
   throw new BPServiceActorActionException(Failed to report bad block 
   + block +  to namenode: );



hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B) Reverted earlier commit

2015-05-04 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 aefadb838 - 01bdfd794


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)
Reverted earlier commit

(cherry picked from commit 318081ccd7af1ec02ec18f35ea95c579326be728)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01bdfd79
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01bdfd79
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01bdfd79

Branch: refs/heads/branch-2
Commit: 01bdfd794cf460ae0a399649eaae54676d101214
Parents: aefadb8
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue May 5 11:05:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue May 5 11:06:44 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 ---
 .../apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java | 4 
 2 files changed, 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bdfd79/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a699a59..7b18365 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -340,9 +340,6 @@ Release 2.7.1 - UNRELEASED
 HDFS-8179. DFSClient#getServerDefaults returns null within 1
 hour of system start. (Xiaoyu Yao via Arpit Agarwal)
 
-HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
-goes for infinite loop (vinayakumarb)
-
 HDFS-8163. Using monotonicNow for block report scheduling causes
 test failures on recently restarted systems. (Arpit Agarwal)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/01bdfd79/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index 991b56d..7155eae 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,7 +26,6 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
-import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -60,9 +59,6 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
 
 try {
   bpNamenode.reportBadBlocks(locatedBlock);
-} catch (StandbyException e) {
-  DataNode.LOG.warn(Failed to report bad block  + block
-  +  to standby namenode);
 } catch (IOException e) {
   throw new BPServiceActorActionException(Failed to report bad block 
   + block +  to namenode: );



hadoop git commit: MAPREDUCE-6259. IllegalArgumentException due to missing job submit time. Contributed by zhihai xu

2015-05-04 Thread jlowe
Repository: hadoop
Updated Branches:
  refs/heads/trunk 3fe79e1db - bf70c5ae2


MAPREDUCE-6259. IllegalArgumentException due to missing job submit time. 
Contributed by zhihai xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf70c5ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf70c5ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf70c5ae

Branch: refs/heads/trunk
Commit: bf70c5ae2824a9139c1aa9d7c14020018881cec2
Parents: 3fe79e1
Author: Jason Lowe jl...@apache.org
Authored: Mon May 4 20:39:18 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon May 4 20:39:18 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../jobhistory/JobHistoryEventHandler.java  | 15 --
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  | 57 +---
 .../mapreduce/jobhistory/AMStartedEvent.java| 16 --
 5 files changed, 77 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf70c5ae/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f7e3bde..481757a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.7.1 - UNRELEASED
 MAPREDUCE-6339. Job history file is not flushed correctly because 
isTimerActive 
 flag is not set true when flushTimerTask is scheduled. (zhihai xu via 
devaraj)
 
+MAPREDUCE-6259. IllegalArgumentException due to missing job submit time
+(zhihai xu via jlowe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf70c5ae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 6b0ea79..bf32888 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -426,10 +426,10 @@ public class JobHistoryEventHandler extends 
AbstractService
* This should be the first call to history for a job
* 
* @param jobId the jobId.
-   * @param forcedJobStateOnShutDown
+   * @param amStartedEvent
* @throws IOException
*/
-  protected void setupEventWriter(JobId jobId, String forcedJobStateOnShutDown)
+  protected void setupEventWriter(JobId jobId, AMStartedEvent amStartedEvent)
   throws IOException {
 if (stagingDirPath == null) {
   LOG.error(Log Directory is null, returning);
@@ -489,8 +489,13 @@ public class JobHistoryEventHandler extends AbstractService
 }
 
 MetaInfo fi = new MetaInfo(historyFile, logDirConfPath, writer,
-user, jobName, jobId, forcedJobStateOnShutDown, queueName);
+user, jobName, jobId, amStartedEvent.getForcedJobStateOnShutDown(),
+queueName);
 fi.getJobSummary().setJobId(jobId);
+fi.getJobSummary().setJobLaunchTime(amStartedEvent.getStartTime());
+fi.getJobSummary().setJobSubmitTime(amStartedEvent.getSubmitTime());
+fi.getJobIndexInfo().setJobStartTime(amStartedEvent.getStartTime());
+fi.getJobIndexInfo().setSubmitTime(amStartedEvent.getSubmitTime());
 fileMap.put(jobId, fi);
   }
 
@@ -541,8 +546,7 @@ public class JobHistoryEventHandler extends AbstractService
 try {
   AMStartedEvent amStartedEvent =
   (AMStartedEvent) event.getHistoryEvent();
-  setupEventWriter(event.getJobID(),
-  amStartedEvent.getForcedJobStateOnShutDown());
+  setupEventWriter(event.getJobID(), amStartedEvent);
 } catch (IOException ioe) {
   LOG.error(Error JobHistoryEventHandler in handleEvent:  + event,
   ioe);
@@ -982,6 +986,7 @@ public class JobHistoryEventHandler extends AbstractService
 tEvent.addEventInfo(NODE_MANAGER_HTTP_PORT,
 ase.getNodeManagerHttpPort());
 tEvent.addEventInfo(START_TIME, ase.getStartTime());
+tEvent.addEventInfo(SUBMIT_TIME, ase.getSubmitTime());
  

hadoop git commit: HDFS-8281. Erasure Coding: implement parallel stateful reading for striped layout. Contributed by Jing Zhao.

2015-05-04 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 35bb92fe2 - 799c0f67f


HDFS-8281. Erasure Coding: implement parallel stateful reading for striped 
layout. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/799c0f67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/799c0f67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/799c0f67

Branch: refs/heads/HDFS-7285
Commit: 799c0f67faa4c29db97288c280d277c247ffa772
Parents: 35bb92f
Author: Jing Zhao ji...@apache.org
Authored: Mon May 4 14:44:58 2015 -0700
Committer: Jing Zhao ji...@apache.org
Committed: Mon May 4 14:44:58 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  26 +++
 .../hadoop/hdfs/DFSStripedInputStream.java  | 217 +--
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  34 ++-
 .../hadoop/hdfs/TestDFSStripedInputStream.java  |  50 -
 .../hadoop/hdfs/TestPlanReadPortions.java   |   4 +-
 6 files changed, 246 insertions(+), 88 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/799c0f67/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index e30b2ed..77272e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -161,3 +161,6 @@
 
 HDFS-8316. Erasure coding: refactor EC constants to be consistent with 
HDFS-8249.
 (Zhe Zhang via jing9)
+
+HDFS-8281. Erasure Coding: implement parallel stateful reading for striped 
layout.
+(jing9)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/799c0f67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index bef4da0..ca799fa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -716,6 +716,16 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   interface ReaderStrategy {
 public int doRead(BlockReader blockReader, int off, int len)
 throws ChecksumException, IOException;
+
+/**
+ * Copy data from the src ByteBuffer into the read buffer.
+ * @param src The src buffer where the data is copied from
+ * @param offset Useful only when the ReadStrategy is based on a byte 
array.
+ *   Indicate the offset of the byte array for copy.
+ * @param length Useful only when the ReadStrategy is based on a byte 
array.
+ *   Indicate the length of the data to copy.
+ */
+public int copyFrom(ByteBuffer src, int offset, int length);
   }
 
   protected void updateReadStatistics(ReadStatistics readStatistics,
@@ -749,6 +759,13 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   updateReadStatistics(readStatistics, nRead, blockReader);
   return nRead;
 }
+
+@Override
+public int copyFrom(ByteBuffer src, int offset, int length) {
+  ByteBuffer writeSlice = src.duplicate();
+  writeSlice.get(buf, offset, length);
+  return length;
+}
   }
 
   /**
@@ -782,6 +799,15 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
 }
   } 
 }
+
+@Override
+public int copyFrom(ByteBuffer src, int offset, int length) {
+  ByteBuffer writeSlice = src.duplicate();
+  int remaining = Math.min(buf.remaining(), writeSlice.remaining());
+  writeSlice.limit(writeSlice.position() + remaining);
+  buf.put(writeSlice);
+  return remaining;
+}
   }
 
   /* This is a used by regular read() and handles ChecksumExceptions.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/799c0f67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 0dc98fd..13c4743 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 

[2/2] hadoop git commit: HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. Contributed by Haohui Mai.

2015-05-04 Thread wheat9
HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db0bd6dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db0bd6dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db0bd6dc

Branch: refs/heads/branch-2
Commit: db0bd6dca837eca60e866f00711576a502087f64
Parents: 81f128f
Author: Haohui Mai whe...@apache.org
Authored: Mon May 4 15:00:29 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon May 4 15:02:49 2015 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   3 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  85 +
 .../hdfs/client/HdfsClientConfigKeys.java   |   1 +
 .../hdfs/protocol/CacheDirectiveEntry.java  |  45 +++
 .../hdfs/protocol/CacheDirectiveInfo.java   | 358 +++
 .../hdfs/protocol/CacheDirectiveStats.java  | 169 +
 .../hadoop/hdfs/protocol/CachePoolEntry.java|  45 +++
 .../hadoop/hdfs/protocol/CachePoolInfo.java | 229 
 .../hadoop/hdfs/protocol/CachePoolStats.java| 115 ++
 .../hdfs/protocol/SnapshotDiffReport.java   | 215 +++
 .../protocol/SnapshottableDirectoryStatus.java  | 218 +++
 .../delegation/DelegationTokenSelector.java |  74 
 .../namenode/NotReplicatedYetException.java |  37 ++
 .../hdfs/server/protocol/DatanodeStorage.java   | 126 +++
 .../server/protocol/DatanodeStorageReport.java  |  42 +++
 .../hdfs/server/protocol/StorageReport.java |  66 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  75 +---
 .../hdfs/protocol/CacheDirectiveEntry.java  |  45 ---
 .../hdfs/protocol/CacheDirectiveInfo.java   | 358 ---
 .../hdfs/protocol/CacheDirectiveStats.java  | 169 -
 .../hadoop/hdfs/protocol/CachePoolEntry.java|  45 ---
 .../hadoop/hdfs/protocol/CachePoolInfo.java | 229 
 .../hadoop/hdfs/protocol/CachePoolStats.java| 115 --
 .../hdfs/protocol/SnapshotDiffReport.java   | 215 ---
 .../protocol/SnapshottableDirectoryStatus.java  | 218 ---
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  10 +-
 .../delegation/DelegationTokenSelector.java |  74 
 .../hadoop/hdfs/server/namenode/INode.java  |   3 +-
 .../namenode/NotReplicatedYetException.java |  37 --
 .../namenode/snapshot/SnapshotManager.java  |   3 +-
 .../hdfs/server/protocol/DatanodeStorage.java   | 126 ---
 .../server/protocol/DatanodeStorageReport.java  |  42 ---
 .../hdfs/server/protocol/StorageReport.java |  66 
 34 files changed, 1842 insertions(+), 1819 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db0bd6dc/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 7aade70..be2911f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -11,6 +11,9 @@
   Class name=org.apache.hadoop.hdfs.protocol.DirectoryListing/
   Class 
name=org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier/
   Class 
name=org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey/
+  Class 
name=org.apache.hadoop.hdfs.protocol.SnapshotDiffReport$DiffReportEntry/
+  Class 
name=org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus/
+  Class 
name=org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport/
 /Or
 Bug pattern=EI_EXPOSE_REP,EI_EXPOSE_REP2 /
   /Match

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db0bd6dc/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 97d3408..eda135e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Maps;
+import com.google.common.primitives.SignedBytes;
 import org.apache.commons.io.Charsets;
 import 

[2/2] hadoop git commit: HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. Contributed by Haohui Mai.

2015-05-04 Thread wheat9
HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0d6aa5d6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0d6aa5d6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0d6aa5d6

Branch: refs/heads/trunk
Commit: 0d6aa5d60948a7966da0ca1c3344a37c1d32f2e9
Parents: bf70c5a
Author: Haohui Mai whe...@apache.org
Authored: Mon May 4 15:00:29 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon May 4 15:00:29 2015 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   3 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  85 +
 .../hdfs/client/HdfsClientConfigKeys.java   |   1 +
 .../hdfs/protocol/CacheDirectiveEntry.java  |  45 +++
 .../hdfs/protocol/CacheDirectiveInfo.java   | 358 +++
 .../hdfs/protocol/CacheDirectiveStats.java  | 169 +
 .../hadoop/hdfs/protocol/CachePoolEntry.java|  45 +++
 .../hadoop/hdfs/protocol/CachePoolInfo.java | 229 
 .../hadoop/hdfs/protocol/CachePoolStats.java| 115 ++
 .../hdfs/protocol/SnapshotDiffReport.java   | 215 +++
 .../protocol/SnapshottableDirectoryStatus.java  | 218 +++
 .../delegation/DelegationTokenSelector.java |  74 
 .../namenode/NotReplicatedYetException.java |  37 ++
 .../hdfs/server/protocol/DatanodeStorage.java   | 126 +++
 .../server/protocol/DatanodeStorageReport.java  |  42 +++
 .../hdfs/server/protocol/StorageReport.java |  66 
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  75 +---
 .../hdfs/protocol/CacheDirectiveEntry.java  |  45 ---
 .../hdfs/protocol/CacheDirectiveInfo.java   | 358 ---
 .../hdfs/protocol/CacheDirectiveStats.java  | 169 -
 .../hadoop/hdfs/protocol/CachePoolEntry.java|  45 ---
 .../hadoop/hdfs/protocol/CachePoolInfo.java | 229 
 .../hadoop/hdfs/protocol/CachePoolStats.java| 115 --
 .../hdfs/protocol/SnapshotDiffReport.java   | 215 ---
 .../protocol/SnapshottableDirectoryStatus.java  | 218 ---
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  10 +-
 .../delegation/DelegationTokenSelector.java |  74 
 .../hadoop/hdfs/server/namenode/INode.java  |   3 +-
 .../namenode/NotReplicatedYetException.java |  37 --
 .../namenode/snapshot/SnapshotManager.java  |   3 +-
 .../hdfs/server/protocol/DatanodeStorage.java   | 126 ---
 .../server/protocol/DatanodeStorageReport.java  |  42 ---
 .../hdfs/server/protocol/StorageReport.java |  66 
 34 files changed, 1842 insertions(+), 1819 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6aa5d6/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 7aade70..be2911f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -11,6 +11,9 @@
   Class name=org.apache.hadoop.hdfs.protocol.DirectoryListing/
   Class 
name=org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier/
   Class 
name=org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey/
+  Class 
name=org.apache.hadoop.hdfs.protocol.SnapshotDiffReport$DiffReportEntry/
+  Class 
name=org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus/
+  Class 
name=org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport/
 /Or
 Bug pattern=EI_EXPOSE_REP,EI_EXPOSE_REP2 /
   /Match

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6aa5d6/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
index 97d3408..eda135e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs;
 
 import com.google.common.base.Joiner;
 import com.google.common.collect.Maps;
+import com.google.common.primitives.SignedBytes;
 import org.apache.commons.io.Charsets;
 import 

hadoop git commit: YARN-3375. NodeHealthScriptRunner.shouldRun() check is performing 3 times for starting NodeHealthScriptRunner (Devaraj K via wangda)

2015-05-04 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 02e650248 - 4e77ee2f6


YARN-3375. NodeHealthScriptRunner.shouldRun() check is performing 3 times for 
starting NodeHealthScriptRunner (Devaraj K via wangda)

(cherry picked from commit 71f4de220c74bf2c90630bd0442979d92380d304)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4e77ee2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4e77ee2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4e77ee2f

Branch: refs/heads/branch-2
Commit: 4e77ee2f6f1c16caa8a0d9f0604166e49a51f0ed
Parents: 02e6502
Author: Wangda Tan wan...@apache.org
Authored: Mon May 4 15:49:19 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon May 4 16:02:57 2015 -0700

--
 .../java/org/apache/hadoop/util/NodeHealthScriptRunner.java  | 8 
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/nodemanager/NodeHealthCheckerService.java| 4 +---
 .../apache/hadoop/yarn/server/nodemanager/NodeManager.java   | 4 +++-
 4 files changed, 7 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e77ee2f/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
index 568ad5b..fc392c4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
@@ -214,11 +214,6 @@ public class NodeHealthScriptRunner extends 
AbstractService {
*/
   @Override
   protected void serviceStart() throws Exception {
-// if health script path is not configured don't start the thread.
-if (!shouldRun(nodeHealthScript)) {
-  LOG.info(Not starting node health monitor);
-  return;
-}
 nodeHealthScriptScheduler = new Timer(NodeHealthMonitor-Timer, true);
 // Start the timer task immediately and
 // then periodically at interval time.
@@ -232,9 +227,6 @@ public class NodeHealthScriptRunner extends AbstractService 
{
*/
   @Override
   protected void serviceStop() {
-if (!shouldRun(nodeHealthScript)) {
-  return;
-}
 if (nodeHealthScriptScheduler != null) {
   nodeHealthScriptScheduler.cancel();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e77ee2f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 8f77854..4bf3f51 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -254,6 +254,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3097. Logging of resource recovery on NM restart has redundancies
 (Eric Payne via jlowe)
 
+YARN-3375. NodeHealthScriptRunner.shouldRun() check is performing 3 times 
for 
+starting NodeHealthScriptRunner. (Devaraj K via wangda)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4e77ee2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
index 02b318a..c1a159a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.NodeHealthScriptRunner;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 /**
  * The class which provides functionality of checking the health of the node 
and
@@ -44,8 +43,7 @@ public class NodeHealthCheckerService extends 
CompositeService {
 
   @Override
   

[30/33] hadoop git commit: HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. Contributed by Haohui Mai.

2015-05-04 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6aa5d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
deleted file mode 100644
index f6b3c34..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
+++ /dev/null
@@ -1,358 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.Date;
-
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSUtil;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Describes a path-based cache directive.
- */
-@InterfaceStability.Evolving
-@InterfaceAudience.Public
-public class CacheDirectiveInfo {
-  /**
-   * A builder for creating new CacheDirectiveInfo instances.
-   */
-  public static class Builder {
-private Long id;
-private Path path;
-private Short replication;
-private String pool;
-private Expiration expiration;
-
-/**
- * Builds a new CacheDirectiveInfo populated with the set properties.
- * 
- * @return New CacheDirectiveInfo.
- */
-public CacheDirectiveInfo build() {
-  return new CacheDirectiveInfo(id, path, replication, pool, expiration);
-}
-
-/**
- * Creates an empty builder.
- */
-public Builder() {
-}
-
-/**
- * Creates a builder with all elements set to the same values as the
- * given CacheDirectiveInfo.
- */
-public Builder(CacheDirectiveInfo directive) {
-  this.id = directive.getId();
-  this.path = directive.getPath();
-  this.replication = directive.getReplication();
-  this.pool = directive.getPool();
-  this.expiration = directive.getExpiration();
-}
-
-/**
- * Sets the id used in this request.
- * 
- * @param id The id used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setId(Long id) {
-  this.id = id;
-  return this;
-}
-
-/**
- * Sets the path used in this request.
- * 
- * @param path The path used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setPath(Path path) {
-  this.path = path;
-  return this;
-}
-
-/**
- * Sets the replication used in this request.
- * 
- * @param replication The replication used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setReplication(Short replication) {
-  this.replication = replication;
-  return this;
-}
-
-/**
- * Sets the pool used in this request.
- * 
- * @param pool The pool used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setPool(String pool) {
-  this.pool = pool;
-  return this;
-}
-
-/**
- * Sets when the CacheDirective should expire. A
- * {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
- * relative expiration time.
- * 
- * @param expiration when this CacheDirective should expire
- * @return This builder, for call chaining
- */
-public Builder setExpiration(Expiration expiration) {
-  this.expiration = expiration;
-  return this;
-}
-  }
-
-  /**
-   * Denotes a relative or absolute expiration time for a CacheDirective. Use
-   * factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} 
and
-   * {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
-   * Expiration.
-   * p
-   * In either case, the server-side clock is used to determine when a
-   * CacheDirective expires.
-   */
-  public static class 

[09/33] hadoop git commit: HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart. (Contributed by Surendra Singh Lilhore)

2015-05-04 Thread jitendra
HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart. (Contributed 
by Surendra Singh Lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6f541edc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6f541edc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6f541edc

Branch: refs/heads/HDFS-7240
Commit: 6f541edce0ed64bf316276715c4bc07794ff20ac
Parents: 7d46a80
Author: Arpit Agarwal a...@apache.org
Authored: Fri May 1 16:30:51 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Fri May 1 16:42:19 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  9 +-
 .../fsdataset/impl/LazyPersistTestCase.java |  1 +
 .../fsdataset/impl/TestLazyPersistFiles.java| 31 
 4 files changed, 43 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f541edc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 179fe7e..1882df5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -601,6 +601,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8276. LazyPersistFileScrubber should be disabled if scrubber interval
 configured zero. (Surendra Singh Lilhore via Arpit Agarwal)
 
+HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart.
+(Surendra Singh Lilhore via Arpit Agarwal) 
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f541edc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index d5ff80e..809d594 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -4767,7 +4767,14 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 public void run() {
   while (fsRunning  shouldRun) {
 try {
-  clearCorruptLazyPersistFiles();
+  if (!isInSafeMode()) {
+clearCorruptLazyPersistFiles();
+  } else {
+if (FSNamesystem.LOG.isDebugEnabled()) {
+  FSNamesystem.LOG
+  .debug(Namenode is in safemode, skipping scrubbing of 
corrupted lazy-persist files.);
+}
+  }
   Thread.sleep(scrubIntervalSec * 1000);
 } catch (InterruptedException e) {
   FSNamesystem.LOG.info(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f541edc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
index 93cd23a..7e1aa81 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/LazyPersistTestCase.java
@@ -258,6 +258,7 @@ public abstract class LazyPersistTestCase {
 LAZY_WRITER_INTERVAL_SEC);
 conf.setLong(DFS_DATANODE_RAM_DISK_LOW_WATERMARK_BYTES,
 evictionLowWatermarkReplicas * BLOCK_SIZE);
+conf.setInt(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY, 1);
 
 if (useSCR) {
   conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6f541edc/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/TestLazyPersistFiles.java
index 60cc8fe..950e9dc 100644

[32/33] hadoop git commit: YARN-3375. NodeHealthScriptRunner.shouldRun() check is performing 3 times for starting NodeHealthScriptRunner (Devaraj K via wangda)

2015-05-04 Thread jitendra
YARN-3375. NodeHealthScriptRunner.shouldRun() check is performing 3 times for 
starting NodeHealthScriptRunner (Devaraj K via wangda)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/71f4de22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/71f4de22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/71f4de22

Branch: refs/heads/HDFS-7240
Commit: 71f4de220c74bf2c90630bd0442979d92380d304
Parents: 0d6aa5d
Author: Wangda Tan wan...@apache.org
Authored: Mon May 4 15:49:19 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon May 4 15:49:19 2015 -0700

--
 .../java/org/apache/hadoop/util/NodeHealthScriptRunner.java  | 8 
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/server/nodemanager/NodeHealthCheckerService.java| 4 +---
 .../apache/hadoop/yarn/server/nodemanager/NodeManager.java   | 4 +++-
 4 files changed, 7 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/71f4de22/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
index 568ad5b..fc392c4 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
@@ -214,11 +214,6 @@ public class NodeHealthScriptRunner extends 
AbstractService {
*/
   @Override
   protected void serviceStart() throws Exception {
-// if health script path is not configured don't start the thread.
-if (!shouldRun(nodeHealthScript)) {
-  LOG.info(Not starting node health monitor);
-  return;
-}
 nodeHealthScriptScheduler = new Timer(NodeHealthMonitor-Timer, true);
 // Start the timer task immediately and
 // then periodically at interval time.
@@ -232,9 +227,6 @@ public class NodeHealthScriptRunner extends AbstractService 
{
*/
   @Override
   protected void serviceStop() {
-if (!shouldRun(nodeHealthScript)) {
-  return;
-}
 if (nodeHealthScriptScheduler != null) {
   nodeHealthScriptScheduler.cancel();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71f4de22/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 08762e3..09ec41a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -299,6 +299,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3097. Logging of resource recovery on NM restart has redundancies
 (Eric Payne via jlowe)
 
+YARN-3375. NodeHealthScriptRunner.shouldRun() check is performing 3 times 
for 
+starting NodeHealthScriptRunner. (Devaraj K via wangda)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/71f4de22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
index 02b318a..c1a159a 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeHealthCheckerService.java
@@ -21,7 +21,6 @@ package org.apache.hadoop.yarn.server.nodemanager;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.service.CompositeService;
 import org.apache.hadoop.util.NodeHealthScriptRunner;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
 
 /**
  * The class which provides functionality of checking the health of the node 
and
@@ -44,8 +43,7 @@ public class NodeHealthCheckerService extends 
CompositeService {
 
   @Override
   protected void serviceInit(Configuration conf) throws Exception {
-if (NodeHealthScriptRunner.shouldRun(
-

[28/33] hadoop git commit: HDFS-7397. Add more detail to the documentation for the conf key dfs.client.read.shortcircuit.streams.cache.size (Brahma Reddy Battula via Colin P. McCabe)

2015-05-04 Thread jitendra
HDFS-7397. Add more detail to the documentation for the conf key 
dfs.client.read.shortcircuit.streams.cache.size (Brahma Reddy Battula via 
Colin P. McCabe)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fe79e1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fe79e1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fe79e1d

Branch: refs/heads/HDFS-7240
Commit: 3fe79e1db84391cb17dbed6b579fe9c803b3d1c2
Parents: 7dc3c120
Author: Colin Patrick Mccabe cmcc...@cloudera.com
Authored: Mon May 4 12:50:29 2015 -0700
Committer: Colin Patrick Mccabe cmcc...@cloudera.com
Committed: Mon May 4 12:50:29 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 4 
 .../hadoop-hdfs/src/main/resources/hdfs-default.xml  | 2 +-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fe79e1d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a365b86..21d73ba 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -500,6 +500,10 @@ Release 2.8.0 - UNRELEASED
 HDFS-8249. Separate HdfsConstants into the client and the server side
 class. (wheat9)
 
+HDFS-7397. Add more detail to the documentation for the conf key
+dfs.client.read.shortcircuit.streams.cache.size (Brahma Reddy Battula via
+Colin P. McCabe)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fe79e1d/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
index f03c0fb..e3e4323 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml
@@ -2063,7 +2063,7 @@
   value256/value
   description
 The DFSClient maintains a cache of recently opened file descriptors.
-This parameter controls the size of that cache.
+This parameter controls the maximum number of file descriptors in the 
cache.
 Setting this higher will use more file descriptors,
 but potentially provide better performance on workloads
 involving lots of seeks.



[02/33] hadoop git commit: HADOOP-11889. Make checkstyle runnable from root project (Gera Shegalov via jeagles)

2015-05-04 Thread jitendra
HADOOP-11889. Make checkstyle runnable from root project (Gera Shegalov via 
jeagles)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2d7363b2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2d7363b2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2d7363b2

Branch: refs/heads/HDFS-7240
Commit: 2d7363b27360e36fdd62546c0f9d0b1d78133f29
Parents: 279958b
Author: Jonathan Eagles jeag...@gmail.com
Authored: Fri May 1 13:11:50 2015 -0500
Committer: Jonathan Eagles jeag...@gmail.com
Committed: Fri May 1 13:11:50 2015 -0500

--
 hadoop-build-tools/pom.xml  |  28 +++
 .../checkstyle/checkstyle-noframes-sorted.xsl   | 194 +++
 .../main/resources/checkstyle/checkstyle.xml| 186 ++
 .../hadoop-common/dev-support/checkstyle.xml| 185 --
 .../src/test/checkstyle-noframes-sorted.xsl | 194 ---
 .../dev-support/checkstyle-noframes-sorted.xsl  | 178 -
 .../hadoop-hdfs/dev-support/checkstyle.xml  | 169 
 hadoop-project-dist/pom.xml |  11 --
 hadoop-project/pom.xml  |   6 -
 .../hadoop-azure/src/config/checkstyle.xml  |   1 -
 hadoop-tools/hadoop-distcp/pom.xml  |   7 -
 pom.xml |  32 +++
 12 files changed, 440 insertions(+), 751 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7363b2/hadoop-build-tools/pom.xml
--
diff --git a/hadoop-build-tools/pom.xml b/hadoop-build-tools/pom.xml
new file mode 100644
index 000..1931072
--- /dev/null
+++ b/hadoop-build-tools/pom.xml
@@ -0,0 +1,28 @@
+?xml version=1.0 encoding=UTF-8?
+!--
+  Licensed under the Apache License, Version 2.0 (the License);
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an AS IS BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+--
+project xmlns=http://maven.apache.org/POM/4.0.0;
+ xmlns:xsi=http://www.w3.org/2001/XMLSchema-instance;
+ xsi:schemaLocation=http://maven.apache.org/POM/4.0.0 
http://maven.apache.org/xsd/maven-4.0.0.xsd;
+parent
+  artifactIdhadoop-main/artifactId
+  groupIdorg.apache.hadoop/groupId
+  version3.0.0-SNAPSHOT/version
+/parent
+modelVersion4.0.0/modelVersion
+
+artifactIdhadoop-build-tools/artifactId
+  descriptionApache Hadoop Build Tools Project/description
+  nameApache Hadoop Build Tools/name
+/project
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2d7363b2/hadoop-build-tools/src/main/resources/checkstyle/checkstyle-noframes-sorted.xsl
--
diff --git 
a/hadoop-build-tools/src/main/resources/checkstyle/checkstyle-noframes-sorted.xsl
 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle-noframes-sorted.xsl
new file mode 100644
index 000..b7826e3
--- /dev/null
+++ 
b/hadoop-build-tools/src/main/resources/checkstyle/checkstyle-noframes-sorted.xsl
@@ -0,0 +1,194 @@
+!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the License); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+   http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an AS IS BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+--
+xsl:stylesheetxmlns:xsl=http://www.w3.org/1999/XSL/Transform; 
version=1.0
+xsl:output method=html indent=yes/
+xsl:decimal-format decimal-separator=. grouping-separator=, /
+
+xsl:key name=files match=file use=@name /
+
+!-- Checkstyle XML Style Sheet by Stephane Bailliez sbaill...@apache.org
 --
+!-- Part of the Checkstyle distribution found at 
http://checkstyle.sourceforge.net --
+!-- Usage (generates checkstyle_report.html): 
 --
+!--checkstyle 

[12/33] hadoop git commit: YARN-2893. AMLaucher: sporadic job failures due to EOFException in readTokenStorageStream. (Zhihai Xu via gera)

2015-05-04 Thread jitendra
YARN-2893. AMLaucher: sporadic job failures due to EOFException in 
readTokenStorageStream. (Zhihai Xu via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f8204e24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f8204e24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f8204e24

Branch: refs/heads/HDFS-7240
Commit: f8204e241d9271497defd4d42646fb89c61cefe3
Parents: 6f541ed
Author: Gera Shegalov g...@apache.org
Authored: Fri May 1 14:49:09 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Fri May 1 18:18:55 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |  3 +
 .../server/resourcemanager/RMAppManager.java| 36 +--
 .../resourcemanager/amlauncher/AMLauncher.java  | 11 +++-
 .../server/resourcemanager/TestAppManager.java  | 60 ++
 .../TestApplicationMasterLauncher.java  | 64 
 5 files changed, 153 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8204e24/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 6f38201..c110f88 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -287,6 +287,9 @@ Release 2.8.0 - UNRELEASED
 YARN-3564. Fix 
TestContainerAllocation.testAMContainerAllocationWhenDNSUnavailable 
 fails randomly. (Jian He via wangda)
 
+YARN-2893. AMLaucher: sporadic job failures due to EOFException in
+readTokenStorageStream. (Zhihai Xu via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f8204e24/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
index e511ff0..ca21f11 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java
@@ -281,29 +281,29 @@ public class RMAppManager implements 
EventHandlerRMAppManagerEvent,
 RMAppImpl application =
 createAndPopulateNewRMApp(submissionContext, submitTime, user, false);
 ApplicationId appId = submissionContext.getApplicationId();
-
-if (UserGroupInformation.isSecurityEnabled()) {
-  try {
+Credentials credentials = null;
+try {
+  credentials = parseCredentials(submissionContext);
+  if (UserGroupInformation.isSecurityEnabled()) {
 this.rmContext.getDelegationTokenRenewer().addApplicationAsync(appId,
-parseCredentials(submissionContext),
-submissionContext.getCancelTokensWhenComplete(),
+credentials, submissionContext.getCancelTokensWhenComplete(),
 application.getUser());
-  } catch (Exception e) {
-LOG.warn(Unable to parse credentials., e);
-// Sending APP_REJECTED is fine, since we assume that the
-// RMApp is in NEW state and thus we haven't yet informed the
-// scheduler about the existence of the application
-assert application.getState() == RMAppState.NEW;
+  } else {
+// Dispatcher is not yet started at this time, so these START events
+// enqueued should be guaranteed to be first processed when dispatcher
+// gets started.
 this.rmContext.getDispatcher().getEventHandler()
-  .handle(new RMAppRejectedEvent(applicationId, e.getMessage()));
-throw RPCUtil.getRemoteException(e);
+.handle(new RMAppEvent(applicationId, RMAppEventType.START));
   }
-} else {
-  // Dispatcher is not yet started at this time, so these START events
-  // enqueued should be guaranteed to be first processed when dispatcher
-  // gets started.
+} catch (Exception e) {
+  LOG.warn(Unable to parse credentials., e);
+  // Sending APP_REJECTED is fine, since we assume that the
+  // RMApp is in NEW state and thus we haven't yet informed the
+  // scheduler about the existence of the application
+  assert application.getState() == 

[21/33] hadoop git commit: MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs unnecessary null strings. Contributed by Akira AJISAKA.

2015-05-04 Thread jitendra
MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs unnecessary 
null strings. Contributed by Akira AJISAKA.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ba18362
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ba18362
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ba18362

Branch: refs/heads/HDFS-7240
Commit: 3ba18362f2a4b83635b89aa0adc5ebaf27d9ca83
Parents: a319771
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 15:02:21 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 15:02:21 2015 +0900

--
 hadoop-mapreduce-project/CHANGES.txt   |  3 +++
 .../hadoop/mapreduce/util/CountersStrings.java | 17 ++---
 .../org/apache/hadoop/mapred/TestCounters.java |  3 +++
 3 files changed, 8 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ba18362/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 2d87444..062042c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -359,6 +359,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6345. Documentation fix for when CRLA is enabled for MRAppMaster
 logs. (Rohit Agarwal via gera)
 
+MAPREDUCE-5905. CountersStrings.toEscapedCompactStrings outputs
+unnecessary null strings. (Akira AJISAKA via ozawa)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ba18362/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
index ce799f5..ac16c12 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/util/CountersStrings.java
@@ -151,25 +151,12 @@ public class CountersStrings {
   public static C extends Counter, G extends CounterGroupBaseC,
  T extends AbstractCountersC, G
   String toEscapedCompactString(T counters) {
-String[] groupsArray;
-int length = 0;
+StringBuilder builder = new StringBuilder();
 synchronized(counters) {
-  groupsArray = new String[counters.countCounters()];
-  int i = 0;
-  // First up, obtain the escaped string for each group so that we can
-  // determine the buffer length apriori.
   for (G group : counters) {
-String escapedString = toEscapedCompactString(group);
-groupsArray[i++] = escapedString;
-length += escapedString.length();
+builder.append(toEscapedCompactString(group));
   }
 }
-
-// Now construct the buffer
-StringBuilder builder = new StringBuilder(length);
-for (String group : groupsArray) {
-  builder.append(group);
-}
 return builder.toString();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ba18362/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
index 46e7221..5e2763e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapred/TestCounters.java
@@ -18,6 +18,7 @@
 package org.apache.hadoop.mapred;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 
 import java.io.IOException;
@@ -84,6 +85,8 @@ public class TestCounters {
*/
   private void testCounter(Counters counter) throws ParseException {
 String compactEscapedString = counter.makeEscapedCompactString();
+

[17/33] hadoop git commit: HDFS-8249. Separate HdfsConstants into the client and the server side class. Contributed by Haohui Mai.

2015-05-04 Thread jitendra
HDFS-8249. Separate HdfsConstants into the client and the server side class. 
Contributed by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6ae2a0d0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6ae2a0d0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6ae2a0d0

Branch: refs/heads/HDFS-7240
Commit: 6ae2a0d048e133b43249c248a75a4d77d9abb80d
Parents: 57d9a97
Author: Haohui Mai whe...@apache.org
Authored: Fri May 1 15:27:28 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Sat May 2 10:03:23 2015 -0700

--
 .../org/apache/hadoop/hdfs/HAUtilClient.java|   2 +-
 .../org/apache/hadoop/hdfs/protocol/Block.java  |   4 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java | 109 
 .../hdfs/protocol/HdfsConstantsClient.java  |  45 -
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   6 +-
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |   4 +-
 .../hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java|   4 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   3 +
 .../bkjournal/BookKeeperJournalManager.java |  14 +-
 .../bkjournal/EditLogLedgerMetadata.java|   8 +-
 .../bkjournal/TestBookKeeperEditLogStreams.java |   6 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   8 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|   8 +-
 .../org/apache/hadoop/hdfs/DataStreamer.java|   5 +-
 .../org/apache/hadoop/hdfs/NameNodeProxies.java |   3 +-
 .../hadoop/hdfs/client/impl/LeaseRenewer.java   |   6 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   4 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java | 175 ---
 .../protocol/SnapshottableDirectoryStatus.java  |   2 +-
 .../protocolPB/ClientNamenodeProtocolPB.java|   2 +-
 ...tNamenodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +-
 .../hdfs/qjournal/client/IPCLoggerChannel.java  |   4 +-
 .../hdfs/qjournal/protocol/RequestInfo.java |   4 +-
 .../QJournalProtocolServerSideTranslatorPB.java |   4 +-
 .../hadoop/hdfs/qjournal/server/Journal.java|  22 +--
 .../hadoop/hdfs/server/balancer/Dispatcher.java |   5 +-
 .../server/blockmanagement/BlockIdManager.java  |  10 +-
 .../BlockPlacementPolicyDefault.java|   4 +-
 .../BlockStoragePolicySuite.java|  19 +-
 .../hdfs/server/common/HdfsServerConstants.java | 109 +---
 .../hadoop/hdfs/server/common/StorageInfo.java  |   5 +-
 .../server/datanode/BlockMetadataHeader.java|   4 +-
 .../server/datanode/BlockPoolSliceStorage.java  |  20 +--
 .../hdfs/server/datanode/BlockReceiver.java |   4 +-
 .../hdfs/server/datanode/BlockSender.java   |   8 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   2 +-
 .../hdfs/server/datanode/DataStorage.java   |  35 ++--
 .../hdfs/server/datanode/DataXceiver.java   |  16 +-
 .../hdfs/server/datanode/DirectoryScanner.java  |   4 +-
 .../datanode/fsdataset/impl/BlockPoolSlice.java |   4 +-
 .../datanode/fsdataset/impl/FsDatasetImpl.java  |   8 +-
 .../datanode/fsdataset/impl/FsDatasetUtil.java  |   4 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../hadoop/hdfs/server/namenode/BackupNode.java |   8 +-
 .../namenode/EditLogBackupInputStream.java  |   6 +-
 .../server/namenode/EditLogFileInputStream.java |  22 +--
 .../server/namenode/EditLogInputStream.java |   4 +-
 .../hdfs/server/namenode/EditsDoubleBuffer.java |   6 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  16 +-
 .../hdfs/server/namenode/FSDirectory.java   |   6 +-
 .../hadoop/hdfs/server/namenode/FSEditLog.java  |   8 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |  24 +--
 .../hdfs/server/namenode/FSEditLogOp.java   |  28 +--
 .../hadoop/hdfs/server/namenode/FSImage.java|  18 +-
 .../hdfs/server/namenode/FSImageFormat.java |   7 +-
 .../server/namenode/FSImageFormatProtobuf.java  |   8 +-
 ...FSImagePreTransactionalStorageInspector.java |   4 +-
 .../namenode/FSImageStorageInspector.java   |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  19 +-
 .../server/namenode/FileJournalManager.java |  22 +--
 .../hadoop/hdfs/server/namenode/INode.java  |  13 +-
 .../hdfs/server/namenode/INodeDirectory.java|   2 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   2 +-
 .../hadoop/hdfs/server/namenode/INodeId.java|   4 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java   |   6 +-
 .../hdfs/server/namenode/INodesInPath.java  |   3 +-
 .../hdfs/server/namenode/LeaseManager.java  |   5 +-
 .../hadoop/hdfs/server/namenode/NNStorage.java  |  11 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |   2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  10 +-
 .../namenode/RedundantEditLogInputStream.java   |  12 +-
 .../hdfs/server/namenode/TransferFsImage.java   |   8 +-
 

[1/2] hadoop git commit: HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. Contributed by Haohui Mai.

2015-05-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk bf70c5ae2 - 0d6aa5d60


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0d6aa5d6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
deleted file mode 100644
index f6b3c34..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
+++ /dev/null
@@ -1,358 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.Date;
-
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSUtil;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Describes a path-based cache directive.
- */
-@InterfaceStability.Evolving
-@InterfaceAudience.Public
-public class CacheDirectiveInfo {
-  /**
-   * A builder for creating new CacheDirectiveInfo instances.
-   */
-  public static class Builder {
-private Long id;
-private Path path;
-private Short replication;
-private String pool;
-private Expiration expiration;
-
-/**
- * Builds a new CacheDirectiveInfo populated with the set properties.
- * 
- * @return New CacheDirectiveInfo.
- */
-public CacheDirectiveInfo build() {
-  return new CacheDirectiveInfo(id, path, replication, pool, expiration);
-}
-
-/**
- * Creates an empty builder.
- */
-public Builder() {
-}
-
-/**
- * Creates a builder with all elements set to the same values as the
- * given CacheDirectiveInfo.
- */
-public Builder(CacheDirectiveInfo directive) {
-  this.id = directive.getId();
-  this.path = directive.getPath();
-  this.replication = directive.getReplication();
-  this.pool = directive.getPool();
-  this.expiration = directive.getExpiration();
-}
-
-/**
- * Sets the id used in this request.
- * 
- * @param id The id used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setId(Long id) {
-  this.id = id;
-  return this;
-}
-
-/**
- * Sets the path used in this request.
- * 
- * @param path The path used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setPath(Path path) {
-  this.path = path;
-  return this;
-}
-
-/**
- * Sets the replication used in this request.
- * 
- * @param replication The replication used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setReplication(Short replication) {
-  this.replication = replication;
-  return this;
-}
-
-/**
- * Sets the pool used in this request.
- * 
- * @param pool The pool used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setPool(String pool) {
-  this.pool = pool;
-  return this;
-}
-
-/**
- * Sets when the CacheDirective should expire. A
- * {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
- * relative expiration time.
- * 
- * @param expiration when this CacheDirective should expire
- * @return This builder, for call chaining
- */
-public Builder setExpiration(Expiration expiration) {
-  this.expiration = expiration;
-  return this;
-}
-  }
-
-  /**
-   * Denotes a relative or absolute expiration time for a CacheDirective. Use
-   * factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} 
and
-   * {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
-   * Expiration.
-   * p
-   * In either case, the server-side clock is used 

hadoop git commit: YARN-2980. Move health check script related functionality to hadoop-common (Varun Saxena via aw)

2015-05-04 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 db0bd6dca - 02e650248


YARN-2980. Move health check script related functionality to hadoop-common 
(Varun Saxena via aw)

(cherry picked from commit d4ac6822e1c5dfac504ced48f10ab57a55b49e93)

Conflicts:

hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/NodeManager.java


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02e65024
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02e65024
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02e65024

Branch: refs/heads/branch-2
Commit: 02e650248d3ebb7c779a259f5419f4ad41048f75
Parents: db0bd6d
Author: Allen Wittenauer a...@apache.org
Authored: Tue Feb 24 11:25:26 2015 -0800
Committer: Wangda Tan wan...@apache.org
Committed: Mon May 4 16:00:20 2015 -0700

--
 .../hadoop/util/NodeHealthScriptRunner.java | 345 ++
 .../hadoop/util/TestNodeHealthScriptRunner.java | 136 +++
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../nodemanager/NodeHealthCheckerService.java   |  12 +-
 .../nodemanager/NodeHealthScriptRunner.java | 356 ---
 .../yarn/server/nodemanager/NodeManager.java|  26 +-
 .../yarn/server/nodemanager/TestEventFlow.java  |   7 +-
 .../nodemanager/TestNodeHealthService.java  |  86 ++---
 .../BaseContainerManagerTest.java   |   7 +-
 .../webapp/TestContainerLogsPage.java   |  13 +-
 .../nodemanager/webapp/TestNMWebServer.java |  13 +-
 .../nodemanager/webapp/TestNMWebServices.java   |   7 +-
 .../webapp/TestNMWebServicesApps.java   |   6 +-
 .../webapp/TestNMWebServicesContainers.java |   5 +-
 14 files changed, 586 insertions(+), 436 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02e65024/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
new file mode 100644
index 000..568ad5b
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NodeHealthScriptRunner.java
@@ -0,0 +1,345 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Timer;
+import java.util.TimerTask;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.service.AbstractService;
+import org.apache.hadoop.util.Shell.ExitCodeException;
+import org.apache.hadoop.util.Shell.ShellCommandExecutor;
+import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * 
+ * The class which provides functionality of checking the health of the node
+ * using the configured node health script and reporting back to the service
+ * for which the health checker has been asked to report.
+ */
+public class NodeHealthScriptRunner extends AbstractService {
+
+  private static Log LOG = LogFactory.getLog(NodeHealthScriptRunner.class);
+
+  /** Absolute path to the health script. */
+  private String nodeHealthScript;
+  /** Delay after which node health script to be executed */
+  private long intervalTime;
+  /** Time after which the script should be timedout */
+  private long scriptTimeout;
+  /** Timer used to schedule node health monitoring script execution */
+  private Timer nodeHealthScriptScheduler;
+
+  /** ShellCommandExecutor used to execute monitoring script */
+  ShellCommandExecutor shexec = null;
+
+  /** Pattern used for searching in the output of the node 

hadoop git commit: YARN-3551. Consolidate data model change according to the backend implementation (Zhijie Shen via sale)

2015-05-04 Thread sjlee
Repository: hadoop
Updated Branches:
  refs/heads/YARN-2928 0b1e184cc - 557a3950b


YARN-3551. Consolidate data model change according to the backend 
implementation (Zhijie Shen via sale)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/557a3950
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/557a3950
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/557a3950

Branch: refs/heads/YARN-2928
Commit: 557a3950bddc837469244835f5577899080115d8
Parents: 0b1e184
Author: Sangjin Lee sj...@apache.org
Authored: Mon May 4 16:10:20 2015 -0700
Committer: Sangjin Lee sj...@apache.org
Committed: Mon May 4 16:10:20 2015 -0700

--
 .../mapred/TimelineServicePerformanceV2.java|   2 +-
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../records/timelineservice/TimelineEntity.java |  16 +--
 .../records/timelineservice/TimelineMetric.java | 131 +--
 .../TestTimelineServiceRecords.java |  81 +---
 .../monitor/ContainersMonitorImpl.java  |   5 +-
 .../TestTimelineServiceClientIntegration.java   |   6 +
 7 files changed, 146 insertions(+), 98 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/557a3950/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
index de46617..1c2e28d 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TimelineServicePerformanceV2.java
@@ -261,7 +261,7 @@ public class TimelineServicePerformanceV2 extends 
Configured implements Tool {
   // add a metric
   TimelineMetric metric = new TimelineMetric();
   metric.setId(foo_metric);
-  metric.setSingleData(123456789L);
+  metric.addValue(System.currentTimeMillis(), 123456789L);
   entity.addMetric(metric);
   // add a config
   entity.addConfig(foo, bar);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/557a3950/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3957b24..8cafca6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -58,6 +58,9 @@ Branch YARN-2928: Timeline Server Next Generation: Phase 1
 YARN-3431. Sub resources of timeline entity needs to be passed to a 
separate 
 endpoint. (Zhijie Shen via junping_du)
 
+YARN-3551. Consolidate data model change according to the backend
+implementation (Zhijie Shen via sjlee)
+
   IMPROVEMENTS
 
   OPTIMIZATIONS

http://git-wip-us.apache.org/repos/asf/hadoop/blob/557a3950/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
index 6cab753..3be7f52 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/timelineservice/TimelineEntity.java
@@ -80,7 +80,7 @@ public class TimelineEntity {
   private TimelineEntity real;
   private Identifier identifier;
   private HashMapString, Object info = new HashMap();
-  private HashMapString, Object configs = new HashMap();
+  private HashMapString, String configs = new HashMap();
   private SetTimelineMetric metrics = new HashSet();
   private SetTimelineEvent events = new HashSet();
   private HashMapString, SetString isRelatedToEntities = new HashMap();
@@ -213,7 +213,7 @@ public class TimelineEntity {
   // required by JAXB
   @InterfaceAudience.Private
   @XmlElement(name = configs)
-  public HashMapString, Object getConfigsJAXB() {
+  public HashMapString, String 

[1/2] hadoop git commit: HDFS-8237. Move all protocol classes used by ClientProtocol to hdfs-client. Contributed by Haohui Mai.

2015-05-04 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 81f128f29 - db0bd6dca


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db0bd6dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
deleted file mode 100644
index f6b3c34..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/CacheDirectiveInfo.java
+++ /dev/null
@@ -1,358 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.util.Date;
-
-import org.apache.commons.lang.builder.EqualsBuilder;
-import org.apache.commons.lang.builder.HashCodeBuilder;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSUtil;
-
-import com.google.common.base.Preconditions;
-
-/**
- * Describes a path-based cache directive.
- */
-@InterfaceStability.Evolving
-@InterfaceAudience.Public
-public class CacheDirectiveInfo {
-  /**
-   * A builder for creating new CacheDirectiveInfo instances.
-   */
-  public static class Builder {
-private Long id;
-private Path path;
-private Short replication;
-private String pool;
-private Expiration expiration;
-
-/**
- * Builds a new CacheDirectiveInfo populated with the set properties.
- * 
- * @return New CacheDirectiveInfo.
- */
-public CacheDirectiveInfo build() {
-  return new CacheDirectiveInfo(id, path, replication, pool, expiration);
-}
-
-/**
- * Creates an empty builder.
- */
-public Builder() {
-}
-
-/**
- * Creates a builder with all elements set to the same values as the
- * given CacheDirectiveInfo.
- */
-public Builder(CacheDirectiveInfo directive) {
-  this.id = directive.getId();
-  this.path = directive.getPath();
-  this.replication = directive.getReplication();
-  this.pool = directive.getPool();
-  this.expiration = directive.getExpiration();
-}
-
-/**
- * Sets the id used in this request.
- * 
- * @param id The id used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setId(Long id) {
-  this.id = id;
-  return this;
-}
-
-/**
- * Sets the path used in this request.
- * 
- * @param path The path used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setPath(Path path) {
-  this.path = path;
-  return this;
-}
-
-/**
- * Sets the replication used in this request.
- * 
- * @param replication The replication used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setReplication(Short replication) {
-  this.replication = replication;
-  return this;
-}
-
-/**
- * Sets the pool used in this request.
- * 
- * @param pool The pool used in this request.
- * @return This builder, for call chaining.
- */
-public Builder setPool(String pool) {
-  this.pool = pool;
-  return this;
-}
-
-/**
- * Sets when the CacheDirective should expire. A
- * {@link CacheDirectiveInfo.Expiration} can specify either an absolute or
- * relative expiration time.
- * 
- * @param expiration when this CacheDirective should expire
- * @return This builder, for call chaining
- */
-public Builder setExpiration(Expiration expiration) {
-  this.expiration = expiration;
-  return this;
-}
-  }
-
-  /**
-   * Denotes a relative or absolute expiration time for a CacheDirective. Use
-   * factory methods {@link CacheDirectiveInfo.Expiration#newAbsolute(Date)} 
and
-   * {@link CacheDirectiveInfo.Expiration#newRelative(long)} to create an
-   * Expiration.
-   * p
-   * In either case, the server-side clock is 

svn commit: r1677710 - in /hadoop/common/site/main: author/ author/src/documentation/ author/src/documentation/content/xdocs/ publish/

2015-05-04 Thread omalley
Author: omalley
Date: Mon May  4 23:18:06 2015
New Revision: 1677710

URL: http://svn.apache.org/r1677710
Log:
HADOOP-11896. Redesign releases page to provide better information. (omalley
reviewed by cnauroth)

Modified:
hadoop/common/site/main/author/forrest.properties
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
hadoop/common/site/main/author/src/documentation/skinconf.xml
hadoop/common/site/main/publish/bylaws.html
hadoop/common/site/main/publish/bylaws.pdf
hadoop/common/site/main/publish/index.html
hadoop/common/site/main/publish/index.pdf
hadoop/common/site/main/publish/issue_tracking.html
hadoop/common/site/main/publish/issue_tracking.pdf
hadoop/common/site/main/publish/linkmap.pdf
hadoop/common/site/main/publish/mailing_lists.html
hadoop/common/site/main/publish/mailing_lists.pdf
hadoop/common/site/main/publish/privacy_policy.html
hadoop/common/site/main/publish/privacy_policy.pdf
hadoop/common/site/main/publish/releases.html
hadoop/common/site/main/publish/releases.pdf
hadoop/common/site/main/publish/version_control.html
hadoop/common/site/main/publish/version_control.pdf
hadoop/common/site/main/publish/who.html
hadoop/common/site/main/publish/who.pdf

Modified: hadoop/common/site/main/author/forrest.properties
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/forrest.properties?rev=1677710r1=1677709r2=1677710view=diff
==
--- hadoop/common/site/main/author/forrest.properties (original)
+++ hadoop/common/site/main/author/forrest.properties Mon May  4 23:18:06 2015
@@ -66,16 +66,10 @@ project.skin=hadoop-pelt
 # This set of properties determine if validation is performed
 # Values are inherited unless overridden.
 # e.g. if forrest.validate=false then all others are false unless set to true.
-#forrest.validate=true
-#forrest.validate.xdocs=${forrest.validate}
-#forrest.validate.skinconf=${forrest.validate}
-# Workaround for http://issues.apache.org/jira/browse/FOR-984
-# Remove when forrest-0.9 is available
-forrest.validate.sitemap=false
-forrest.validate.stylesheets=false
-forrest.validate.skins.stylesheets=false
-# End of forrest-0.8 + JDK6 workaround
-#forrest.validate.skins=${forrest.validate}
+forrest.validate=true
+forrest.validate.xdocs=${forrest.validate}
+forrest.validate.skinconf=${forrest.validate}
+forrest.validate.skins=${forrest.validate}
 
 # *.failonerror=(true|false) - stop when an XML file is invalid
 #forrest.validate.failonerror=true

Modified: 
hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml
URL: 
http://svn.apache.org/viewvc/hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml?rev=1677710r1=1677709r2=1677710view=diff
==
--- hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
(original)
+++ hadoop/common/site/main/author/src/documentation/content/xdocs/releases.xml 
Mon May  4 23:18:06 2015
@@ -6,7 +6,7 @@
 document 
 
   header 
-titleHadoop Releases/title 
+titleApache Hadoop Releases/title 
   /header 
 
   body 
@@ -14,28 +14,106 @@
 section
   titleDownload/title
 
-  ul
-listrong1.2.X - /strong current stable version, 1.2 release/li
- listrong2.6.X - /strong latest stable 2.x version/li
- listrong2.7.X - /strong latest 2.x version/li
-listrong0.23.X - /strong similar to 2.X.X but missing NN HA./li
-  /ul
-
-  pReleases may be downloaded from Apache mirrors./p
-
   p
-  a href=http://www.apache.org/dyn/closer.cgi/hadoop/common/;
-  strongDownload a release now!/strong/a
+  Hadoop is released as source code tarballs with corresponding binary 
+  tarballs for convenience. The downloads are distributed via mirror
+  sites and should be checked for tampering using GPG or SHA-256.
   /p
 
-  pOn the mirror, all recent releases are available./p
-  
-  pThird parties may distribute products that include Apache Hadoop and 
derived works, under the Apache License.
-  Some of these are listed on the a 
href=http://wiki.apache.org/hadoop/Distribution;Distributions wiki 
page/a./p
+  p
+  table
+   tr
+ thVersion/th
+ thRelease Date/th
+ thTarball/th
+ thGPG/th
+ thSHA-256/th
+   /tr
+   tr
+ tda href=#21+April+2015%3A+Release+2.7.0+available2.7.0/a/td
+ td21 Apr 2015/td
+ tda 
href=http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz;source/a/td
+ tda 
href=https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.asc;signature/a/td
+ tda 
href=https://dist.apache.org/repos/dist/release/hadoop/common/hadoop-2.7.0/hadoop-2.7.0-src.tar.gz.mds;E7F877A3
 

[23/33] hadoop git commit: HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when native code is not loaded. Contributed by Zhijie Shen.

2015-05-04 Thread jitendra
HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when 
native code is not loaded. Contributed by Zhijie Shen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76fa606e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76fa606e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76fa606e

Branch: refs/heads/HDFS-7240
Commit: 76fa606e2d3d04407f2f6b4ea276cce0f60db4be
Parents: bb6ef29
Author: Tsuyoshi Ozawa oz...@apache.org
Authored: Mon May 4 17:05:00 2015 +0900
Committer: Tsuyoshi Ozawa oz...@apache.org
Committed: Mon May 4 17:05:00 2015 +0900

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java  | 3 ++-
 2 files changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76fa606e/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index ea3316a..bb8f900 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -616,6 +616,9 @@ Release 2.7.1 - UNRELEASED
 
 HADOOP-11891. OsSecureRandom should lazily fill its reservoir (asuresh)
 
+HADOOP-9658. SnappyCodec#checkNativeCodeLoaded may unexpectedly fail when
+native code is not loaded. (Zhijie Shen via ozawa)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76fa606e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
index 8d2fa1a..2a9c5d0 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/SnappyCodec.java
@@ -60,7 +60,8 @@ public class SnappyCodec implements Configurable, 
CompressionCodec, DirectDecomp
* Are the native snappy libraries loaded  initialized?
*/
   public static void checkNativeCodeLoaded() {
-  if (!NativeCodeLoader.buildSupportsSnappy()) {
+  if (!NativeCodeLoader.isNativeCodeLoaded() ||
+  !NativeCodeLoader.buildSupportsSnappy()) {
 throw new RuntimeException(native snappy library not available:  +
 this version of libhadoop was built without  +
 snappy support.);



[20/33] hadoop git commit: HDFS-8309. Skip unit test using DataNodeTestUtils#injectDataDirFailure() on Windows. (xyao)

2015-05-04 Thread jitendra
HDFS-8309. Skip unit test using DataNodeTestUtils#injectDataDirFailure() on 
Windows. (xyao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a319771d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a319771d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a319771d

Branch: refs/heads/HDFS-7240
Commit: a319771d1d9eebaf8e4165dba73383a229cb1525
Parents: e8d0ee5
Author: Xiaoyu Yao x...@apache.org
Authored: Sat May 2 22:15:24 2015 -0700
Committer: Xiaoyu Yao x...@apache.org
Committed: Sat May 2 22:15:24 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  5 -
 .../hdfs/server/datanode/TestDataNodeHotSwapVolumes.java |  5 +
 .../hdfs/server/datanode/TestDataNodeVolumeFailure.java  | 11 +++
 .../datanode/TestDataNodeVolumeFailureReporting.java |  5 ++---
 4 files changed, 18 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a319771d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 71873a4..e525800 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -605,7 +605,10 @@ Release 2.8.0 - UNRELEASED
 configured zero. (Surendra Singh Lilhore via Arpit Agarwal)
 
 HDFS-8229. LAZY_PERSIST file gets deleted after NameNode restart.
-(Surendra Singh Lilhore via Arpit Agarwal) 
+(Surendra Singh Lilhore via Arpit Agarwal)
+
+HDFS-8309. Skip unit test using DataNodeTestUtils#injectDataDirFailure() 
on Windows.
+(xyao)
 
 Release 2.7.1 - UNRELEASED
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a319771d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
index 668084b..315529c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeHotSwapVolumes.java
@@ -78,6 +78,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertThat;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
+import static org.junit.Assume.assumeTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Matchers.anyString;
 import static org.mockito.Mockito.doAnswer;
@@ -705,6 +706,10 @@ public class TestDataNodeHotSwapVolumes {
   public void testDirectlyReloadAfterCheckDiskError()
   throws IOException, TimeoutException, InterruptedException,
   ReconfigurationException {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeTrue(!Path.WINDOWS);
+
 startDFSCluster(1, 2);
 createFile(new Path(/test), 32, (short)2);
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a319771d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index 0a90947..0d158c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -207,8 +207,12 @@ public class TestDataNodeVolumeFailure {
* after failure.
*/
   @Test(timeout=15)
-  public void testFailedVolumeBeingRemovedFromDataNode()
+public void testFailedVolumeBeingRemovedFromDataNode()
   throws InterruptedException, IOException, TimeoutException {
+// The test uses DataNodeTestUtils#injectDataDirFailure() to simulate
+// volume failures which is currently not supported on Windows.
+assumeTrue(!Path.WINDOWS);
+
 Path file1 = new Path(/test1);
 DFSTestUtil.createFile(fs, file1, 1024, (short) 2, 1L);
 DFSTestUtil.waitReplication(fs, file1, (short) 2);
@@ -270,9 +274,8 @@ public class TestDataNodeVolumeFailure 

[29/33] hadoop git commit: MAPREDUCE-6259. IllegalArgumentException due to missing job submit time. Contributed by zhihai xu

2015-05-04 Thread jitendra
MAPREDUCE-6259. IllegalArgumentException due to missing job submit time. 
Contributed by zhihai xu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf70c5ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf70c5ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf70c5ae

Branch: refs/heads/HDFS-7240
Commit: bf70c5ae2824a9139c1aa9d7c14020018881cec2
Parents: 3fe79e1
Author: Jason Lowe jl...@apache.org
Authored: Mon May 4 20:39:18 2015 +
Committer: Jason Lowe jl...@apache.org
Committed: Mon May 4 20:39:18 2015 +

--
 hadoop-mapreduce-project/CHANGES.txt|  3 ++
 .../jobhistory/JobHistoryEventHandler.java  | 15 --
 .../hadoop/mapreduce/v2/app/MRAppMaster.java|  4 +-
 .../jobhistory/TestJobHistoryEventHandler.java  | 57 +---
 .../mapreduce/jobhistory/AMStartedEvent.java| 16 --
 5 files changed, 77 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf70c5ae/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index f7e3bde..481757a 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -397,6 +397,9 @@ Release 2.7.1 - UNRELEASED
 MAPREDUCE-6339. Job history file is not flushed correctly because 
isTimerActive 
 flag is not set true when flushTimerTask is scheduled. (zhihai xu via 
devaraj)
 
+MAPREDUCE-6259. IllegalArgumentException due to missing job submit time
+(zhihai xu via jlowe)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf70c5ae/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
index 6b0ea79..bf32888 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java
@@ -426,10 +426,10 @@ public class JobHistoryEventHandler extends 
AbstractService
* This should be the first call to history for a job
* 
* @param jobId the jobId.
-   * @param forcedJobStateOnShutDown
+   * @param amStartedEvent
* @throws IOException
*/
-  protected void setupEventWriter(JobId jobId, String forcedJobStateOnShutDown)
+  protected void setupEventWriter(JobId jobId, AMStartedEvent amStartedEvent)
   throws IOException {
 if (stagingDirPath == null) {
   LOG.error(Log Directory is null, returning);
@@ -489,8 +489,13 @@ public class JobHistoryEventHandler extends AbstractService
 }
 
 MetaInfo fi = new MetaInfo(historyFile, logDirConfPath, writer,
-user, jobName, jobId, forcedJobStateOnShutDown, queueName);
+user, jobName, jobId, amStartedEvent.getForcedJobStateOnShutDown(),
+queueName);
 fi.getJobSummary().setJobId(jobId);
+fi.getJobSummary().setJobLaunchTime(amStartedEvent.getStartTime());
+fi.getJobSummary().setJobSubmitTime(amStartedEvent.getSubmitTime());
+fi.getJobIndexInfo().setJobStartTime(amStartedEvent.getStartTime());
+fi.getJobIndexInfo().setSubmitTime(amStartedEvent.getSubmitTime());
 fileMap.put(jobId, fi);
   }
 
@@ -541,8 +546,7 @@ public class JobHistoryEventHandler extends AbstractService
 try {
   AMStartedEvent amStartedEvent =
   (AMStartedEvent) event.getHistoryEvent();
-  setupEventWriter(event.getJobID(),
-  amStartedEvent.getForcedJobStateOnShutDown());
+  setupEventWriter(event.getJobID(), amStartedEvent);
 } catch (IOException ioe) {
   LOG.error(Error JobHistoryEventHandler in handleEvent:  + event,
   ioe);
@@ -982,6 +986,7 @@ public class JobHistoryEventHandler extends AbstractService
 tEvent.addEventInfo(NODE_MANAGER_HTTP_PORT,
 ase.getNodeManagerHttpPort());
 tEvent.addEventInfo(START_TIME, ase.getStartTime());
+tEvent.addEventInfo(SUBMIT_TIME, ase.getSubmitTime());
 tEntity.addEvent(tEvent);
 

[16/33] hadoop git commit: HDFS-8249. Separate HdfsConstants into the client and the server side class. Contributed by Haohui Mai.

2015-05-04 Thread jitendra
http://git-wip-us.apache.org/repos/asf/hadoop/blob/6ae2a0d0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
index d26a9a5..09a2d8b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
@@ -24,8 +24,8 @@ import com.google.common.collect.Lists;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.HardLink;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ -267,7 +267,7 @@ public class BlockPoolSliceStorage extends Storage {
 LOG.info(Formatting block pool  + blockpoolID +  directory 
 + bpSdir.getCurrentDir());
 bpSdir.clearDirectory(); // create directory
-this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
+this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
 this.cTime = nsInfo.getCTime();
 this.namespaceID = nsInfo.getNamespaceID();
 this.blockpoolID = nsInfo.getBlockPoolID();
@@ -361,7 +361,7 @@ public class BlockPoolSliceStorage extends Storage {
 }
 readProperties(sd);
 checkVersionUpgradable(this.layoutVersion);
-assert this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION 
+assert this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION
: Future version is not allowed;
 if (getNamespaceID() != nsInfo.getNamespaceID()) {
   throw new IOException(Incompatible namespaceIDs in 
@@ -375,17 +375,17 @@ public class BlockPoolSliceStorage extends Storage {
   + nsInfo.getBlockPoolID() + ; datanode blockpoolID = 
   + blockpoolID);
 }
-if (this.layoutVersion == HdfsConstants.DATANODE_LAYOUT_VERSION
+if (this.layoutVersion == HdfsServerConstants.DATANODE_LAYOUT_VERSION
  this.cTime == nsInfo.getCTime()) {
   return; // regular startup
 }
-if (this.layoutVersion  HdfsConstants.DATANODE_LAYOUT_VERSION) {
+if (this.layoutVersion  HdfsServerConstants.DATANODE_LAYOUT_VERSION) {
   int restored = restoreBlockFilesFromTrash(getTrashRootDir(sd));
   LOG.info(Restored  + restored +  block files from trash  +
 before the layout upgrade. These blocks will be moved to  +
 the previous directory during the upgrade);
 }
-if (this.layoutVersion  HdfsConstants.DATANODE_LAYOUT_VERSION
+if (this.layoutVersion  HdfsServerConstants.DATANODE_LAYOUT_VERSION
 || this.cTime  nsInfo.getCTime()) {
   doUpgrade(datanode, sd, nsInfo); // upgrade
   return;
@@ -425,7 +425,7 @@ public class BlockPoolSliceStorage extends Storage {
 }
 LOG.info(Upgrading block pool storage directory  + bpSd.getRoot()
 + .\n   old LV =  + this.getLayoutVersion() + ; old CTime = 
-+ this.getCTime() + .\n   new LV =  + 
HdfsConstants.DATANODE_LAYOUT_VERSION
++ this.getCTime() + .\n   new LV =  + 
HdfsServerConstants.DATANODE_LAYOUT_VERSION
 + ; new CTime =  + nsInfo.getCTime());
 // get SD/previous directory
 String dnRoot = getDataNodeStorageRoot(bpSd.getRoot().getCanonicalPath());
@@ -454,7 +454,7 @@ public class BlockPoolSliceStorage extends Storage {
 
 // 3. Create new SD/current with block files hardlinks and VERSION
 linkAllBlocks(datanode, bpTmpDir, bpCurDir);
-this.layoutVersion = HdfsConstants.DATANODE_LAYOUT_VERSION;
+this.layoutVersion = HdfsServerConstants.DATANODE_LAYOUT_VERSION;
 assert this.namespaceID == nsInfo.getNamespaceID() 
 : Data-node and name-node layout versions must be the same.;
 this.cTime = nsInfo.getCTime();
@@ -563,13 +563,13 @@ public class BlockPoolSliceStorage extends Storage {
 // the namespace state or can be further upgraded to it.
 // In another word, we can only roll back when ( storedLV = software LV)
 //  ( DN.previousCTime = NN.ctime)
-if (!(prevInfo.getLayoutVersion() = HdfsConstants.DATANODE_LAYOUT_VERSION 
 
+if (!(prevInfo.getLayoutVersion() = 
HdfsServerConstants.DATANODE_LAYOUT_VERSION 
 prevInfo.getCTime() = nsInfo.getCTime())) { // cannot rollback
   throw new InconsistentFSStateException(bpSd.getRoot(),
   

[26/33] hadoop git commit: HDFS-8290. WebHDFS calls before namesystem initialization can cause NullPointerException. Contributed by Chris Nauroth.

2015-05-04 Thread jitendra
HDFS-8290. WebHDFS calls before namesystem initialization can cause 
NullPointerException. Contributed by Chris Nauroth.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c4578760
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c4578760
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c4578760

Branch: refs/heads/HDFS-7240
Commit: c4578760b67d5b5169949a1b059f4472a268ff1b
Parents: 8f65c79
Author: cnauroth cnaur...@apache.org
Authored: Mon May 4 11:35:04 2015 -0700
Committer: cnauroth cnaur...@apache.org
Committed: Mon May 4 11:35:04 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +++
 .../web/resources/NamenodeWebHdfsMethods.java   |  7 ++-
 .../web/resources/TestWebHdfsDataLocality.java  | 20 +++-
 3 files changed, 28 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4578760/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e525800..a365b86 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -610,6 +610,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8309. Skip unit test using DataNodeTestUtils#injectDataDirFailure() 
on Windows.
 (xyao)
 
+HDFS-8290. WebHDFS calls before namesystem initialization can cause
+NullPointerException. (cnauroth)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4578760/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 2c1148e..d33721c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -69,6 +69,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.web.JsonUtil;
@@ -164,7 +165,11 @@ public class NamenodeWebHdfsMethods {
   static DatanodeInfo chooseDatanode(final NameNode namenode,
   final String path, final HttpOpParam.Op op, final long openOffset,
   final long blocksize, final String excludeDatanodes) throws IOException {
-final BlockManager bm = namenode.getNamesystem().getBlockManager();
+FSNamesystem fsn = namenode.getNamesystem();
+if (fsn == null) {
+  throw new IOException(Namesystem has not been intialized yet.);
+}
+final BlockManager bm = fsn.getBlockManager();
 
 HashSetNode excludes = new HashSetNode();
 if (excludeDatanodes != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c4578760/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
index 077361c..15e1c04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java
@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.web.resources;
 
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
 import java.util.Arrays;
 import java.util.List;
 
@@ -42,7 +45,9 @@ import org.apache.hadoop.hdfs.web.resources.PostOpParam;
 import org.apache.hadoop.hdfs.web.resources.PutOpParam;
 import org.apache.log4j.Level;
 import org.junit.Assert;
+import 

[01/33] hadoop git commit: HDFS-7281. Missing block is marked as corrupted block (Ming Ma via Yongjun Zhang)

2015-05-04 Thread jitendra
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 1b3b9e5c3 - d701acc9c


HDFS-7281. Missing block is marked as corrupted block (Ming Ma via Yongjun 
Zhang)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/279958b7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/279958b7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/279958b7

Branch: refs/heads/HDFS-7240
Commit: 279958b772c25e0633bd967828b7d27d5c0a6a56
Parents: 1b3b9e5
Author: Yongjun Zhang yzh...@cloudera.com
Authored: Fri May 1 08:42:00 2015 -0700
Committer: Yongjun Zhang yzh...@cloudera.com
Committed: Fri May 1 08:42:00 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 ++
 .../server/blockmanagement/BlockManager.java|  3 +-
 .../hdfs/server/namenode/NamenodeFsck.java  | 54 +++-
 .../hadoop/hdfs/server/namenode/TestFsck.java   | 23 ++---
 4 files changed, 63 insertions(+), 20 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/279958b7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 3bee852..9accdc0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -20,6 +20,9 @@ Trunk (Unreleased)
 
 HDFS-7985. WebHDFS should be always enabled. (Li Lu via wheat9)
 
+HDFS-7281. Missing block is marked as corrupted block (Ming Ma via
+Yongjun Zhang)
+ 
   NEW FEATURES
 
 HDFS-3125. Add JournalService to enable Journal Daemon. (suresh)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/279958b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1db1356..53ffe0b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -849,7 +849,8 @@ public class BlockManager {
 }
 
 final int numNodes = blocksMap.numNodes(blk);
-final boolean isCorrupt = numCorruptNodes == numNodes;
+final boolean isCorrupt = numCorruptNodes != 0 
+numCorruptNodes == numNodes;
 final int numMachines = isCorrupt ? numNodes: numNodes - numCorruptNodes;
 final DatanodeStorageInfo[] machines = new 
DatanodeStorageInfo[numMachines];
 int j = 0;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/279958b7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
index 0cfe31a..ac77394 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
@@ -531,6 +531,7 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
 int missing = 0;
 int corrupt = 0;
 long missize = 0;
+long corruptSize = 0;
 int underReplicatedPerFile = 0;
 int misReplicatedPerFile = 0;
 StringBuilder report = new StringBuilder();
@@ -570,10 +571,11 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   // count corrupt blocks
   boolean isCorrupt = lBlk.isCorrupt();
   if (isCorrupt) {
+res.addCorrupt(block.getNumBytes());
 corrupt++;
-res.corruptBlocks++;
-out.print(\n + path + : CORRUPT blockpool  + 
block.getBlockPoolId() + 
- block  + block.getBlockName()+\n);
+corruptSize += block.getNumBytes();
+out.print(\n + path + : CORRUPT blockpool  +
+block.getBlockPoolId() +  block  + block.getBlockName() + \n);
   }
 
   // count minimally replicated blocks
@@ -619,7 +621,11 @@ public class NamenodeFsck implements 
DataEncryptionKeyFactory {
   // report
   String blkName = block.toString();
   report.append(blockNumber + .  + blkName +  len= + 
block.getNumBytes());
-  if (totalReplicasPerBlock == 0) {
+  if (totalReplicasPerBlock == 0  !isCorrupt) 

[11/33] hadoop git commit: HADOOP-11491. HarFs incorrectly declared as requiring an authority. (Brahma Reddy Battula via gera)

2015-05-04 Thread jitendra
HADOOP-11491. HarFs incorrectly declared as requiring an authority. (Brahma 
Reddy Battula via gera)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f343f865
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f343f865
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f343f865

Branch: refs/heads/HDFS-7240
Commit: f343f8657e2b01773a32c2c7d960dc368954b42e
Parents: f8204e2
Author: Gera Shegalov g...@apache.org
Authored: Fri May 1 15:44:36 2015 -0700
Committer: Gera Shegalov g...@apache.org
Committed: Fri May 1 18:18:55 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt   | 3 +++
 .../src/main/java/org/apache/hadoop/fs/HarFs.java | 2 +-
 .../java/org/apache/hadoop/fs/TestHarFileSystemBasics.java| 7 +++
 3 files changed, 11 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f343f865/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0a53396..d00e3ef 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -585,6 +585,9 @@ Release 2.8.0 - UNRELEASED
 HADOOP-11866. increase readability and reliability of checkstyle,
 shellcheck, and whitespace reports (aw)
 
+HADOOP-11491. HarFs incorrectly declared as requiring an authority.
+(Brahma Reddy Battula via gera)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f343f865/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java
index a2369e3..4f5fde8 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFs.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 public class HarFs extends DelegateToFileSystem {
   HarFs(final URI theUri, final Configuration conf)
   throws IOException, URISyntaxException {
-super(theUri, new HarFileSystem(), conf, har, true);
+super(theUri, new HarFileSystem(), conf, har, false);
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f343f865/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
index 577abfd..53507b9 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
@@ -398,4 +398,11 @@ public class TestHarFileSystemBasics {
 }
   }
 
+  @Test
+  public void testHarFsWithoutAuthority() throws Exception {
+final URI uri = harFileSystem.getUri();
+Assert.assertNull(har uri authority not null:  + uri, 
uri.getAuthority());
+FileContext.getFileContext(uri, conf);
+  }
+
 }



[04/33] hadoop git commit: Updating CHANGES.txt - Pulling in HDFS-8091 to branch-2.7 (for 2.7.1)

2015-05-04 Thread jitendra
Updating CHANGES.txt - Pulling in HDFS-8091 to branch-2.7 (for 2.7.1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/33934611
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/33934611
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/33934611

Branch: refs/heads/HDFS-7240
Commit: 3393461197da869c3f1e47888e03670106bda023
Parents: b82567d
Author: Arun Suresh asur...@apache.org
Authored: Fri May 1 11:48:54 2015 -0700
Committer: Arun Suresh asur...@apache.org
Committed: Fri May 1 11:48:54 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/33934611/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 16094a2..5f6b283 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -539,9 +539,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-8096. DatanodeMetrics#blocksReplicated will get incremented early and
 even for failed transfers (vinayakumarb)
 
-HDFS-8091: ACLStatus and XAttributes should be presented to
-INodeAttributesProvider before returning to client (asuresh)
-
 HDFS-7939. Two fsimage_rollback_* files are created which are not deleted
 after rollback. (J.Andreina via vinayakumarb)
 
@@ -660,6 +657,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8269. getBlockLocations() does not resolve the .reserved path and
 generates incorrect edit logs when updating the atime. (wheat9)
 
+HDFS-8091: ACLStatus and XAttributes should be presented to
+INodeAttributesProvider before returning to client (asuresh)
+
 Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES



  1   2   >