[3/3] hadoop git commit: HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system start. (Contributed by Xiaoyu Yao)

2015-04-20 Thread arp
HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system 
start. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cc42ad03
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cc42ad03
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cc42ad03

Branch: refs/heads/branch-2.7
Commit: cc42ad03d98d460da409ca26749660da19da8064
Parents: b493ec2
Author: Arpit Agarwal a...@apache.org
Authored: Mon Apr 20 15:42:42 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Mon Apr 20 15:42:58 2015 -0700

--
 .../src/main/java/org/apache/hadoop/fs/Trash.java|  5 +
 .../org/apache/hadoop/fs/TrashPolicyDefault.java |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +++-
 .../hadoop/hdfs/TestDistributedFileSystem.java   | 15 +++
 5 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc42ad03/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index 2d5f540..aae5cf7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +34,9 @@ import org.apache.hadoop.conf.Configured;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Trash extends Configured {
+  private static final org.apache.commons.logging.Log LOG =
+  LogFactory.getLog(Trash.class);
+
   private TrashPolicy trashPolicy; // configured trash policy instance
 
   /** 
@@ -84,6 +88,7 @@ public class Trash extends Configured {
 } catch (Exception e) {
   // If we can not determine that trash is enabled server side then
   // bail rather than potentially deleting a file when trash is enabled.
+  LOG.warn(Failed to get server trash configuration, e);
   throw new IOException(Failed to get server trash configuration, e);
 }
 Trash trash = new Trash(fullyResolvedFs, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc42ad03/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index cfb51e2..d6a9b4b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -134,11 +134,11 @@ public class TrashPolicyDefault extends TrashPolicy {
 for (int i = 0; i  2; i++) {
   try {
 if (!fs.mkdirs(baseTrashPath, PERMISSION)) {  // create current
-  LOG.warn(Can't create(mkdir) trash directory: +baseTrashPath);
+  LOG.warn(Can't create(mkdir) trash directory:  + baseTrashPath);
   return false;
 }
   } catch (IOException e) {
-LOG.warn(Can't create trash directory: +baseTrashPath);
+LOG.warn(Can't create trash directory:  + baseTrashPath, e);
 cause = e;
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cc42ad03/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 77e83d4..b2d2aff 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -32,6 +32,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8153. Error Message points to wrong parent directory in case of
 path component name length error (Anu Engineer via jitendra)
 
+HDFS-8179. DFSClient#getServerDefaults returns null within 1
+hour of system start. (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


[1/3] hadoop git commit: HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system start. (Contributed by Xiaoyu Yao)

2015-04-20 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 756c25429 - 95a8d452c
  refs/heads/branch-2.7 b493ec28a - cc42ad03d
  refs/heads/trunk d50e8f092 - c92f6f360


HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system 
start. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c92f6f36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c92f6f36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c92f6f36

Branch: refs/heads/trunk
Commit: c92f6f360515cc21ecb9b9f49b3e59537ef0cb05
Parents: d50e8f0
Author: Arpit Agarwal a...@apache.org
Authored: Mon Apr 20 15:42:42 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Mon Apr 20 15:42:42 2015 -0700

--
 .../src/main/java/org/apache/hadoop/fs/Trash.java|  5 +
 .../org/apache/hadoop/fs/TrashPolicyDefault.java |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +++-
 .../hadoop/hdfs/TestDistributedFileSystem.java   | 15 +++
 5 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c92f6f36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index 2d5f540..aae5cf7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +34,9 @@ import org.apache.hadoop.conf.Configured;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Trash extends Configured {
+  private static final org.apache.commons.logging.Log LOG =
+  LogFactory.getLog(Trash.class);
+
   private TrashPolicy trashPolicy; // configured trash policy instance
 
   /** 
@@ -84,6 +88,7 @@ public class Trash extends Configured {
 } catch (Exception e) {
   // If we can not determine that trash is enabled server side then
   // bail rather than potentially deleting a file when trash is enabled.
+  LOG.warn(Failed to get server trash configuration, e);
   throw new IOException(Failed to get server trash configuration, e);
 }
 Trash trash = new Trash(fullyResolvedFs, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c92f6f36/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index cfb51e2..d6a9b4b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -134,11 +134,11 @@ public class TrashPolicyDefault extends TrashPolicy {
 for (int i = 0; i  2; i++) {
   try {
 if (!fs.mkdirs(baseTrashPath, PERMISSION)) {  // create current
-  LOG.warn(Can't create(mkdir) trash directory: +baseTrashPath);
+  LOG.warn(Can't create(mkdir) trash directory:  + baseTrashPath);
   return false;
 }
   } catch (IOException e) {
-LOG.warn(Can't create trash directory: +baseTrashPath);
+LOG.warn(Can't create trash directory:  + baseTrashPath, e);
 cause = e;
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c92f6f36/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 8dec32e..2d20812 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -561,6 +561,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8153. Error Message points to wrong parent directory in case of
 path component name length error (Anu Engineer via jitendra)
 
+HDFS-8179. DFSClient#getServerDefaults returns null within 1
+hour of system start. (Xiaoyu Yao via Arpit Agarwal)
+
 

[2/3] hadoop git commit: HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system start. (Contributed by Xiaoyu Yao)

2015-04-20 Thread arp
HDFS-8179. DFSClient#getServerDefaults returns null within 1 hour of system 
start. (Contributed by Xiaoyu Yao)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95a8d452
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95a8d452
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95a8d452

Branch: refs/heads/branch-2
Commit: 95a8d452c58a3e07e1128788abcf03e6e7a8b823
Parents: 756c254
Author: Arpit Agarwal a...@apache.org
Authored: Mon Apr 20 15:42:42 2015 -0700
Committer: Arpit Agarwal a...@apache.org
Committed: Mon Apr 20 15:42:49 2015 -0700

--
 .../src/main/java/org/apache/hadoop/fs/Trash.java|  5 +
 .../org/apache/hadoop/fs/TrashPolicyDefault.java |  4 ++--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  |  3 +++
 .../main/java/org/apache/hadoop/hdfs/DFSClient.java  |  4 +++-
 .../hadoop/hdfs/TestDistributedFileSystem.java   | 15 +++
 5 files changed, 28 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a8d452/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
index 2d5f540..aae5cf7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Trash.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
@@ -33,6 +34,9 @@ import org.apache.hadoop.conf.Configured;
 @InterfaceAudience.Public
 @InterfaceStability.Stable
 public class Trash extends Configured {
+  private static final org.apache.commons.logging.Log LOG =
+  LogFactory.getLog(Trash.class);
+
   private TrashPolicy trashPolicy; // configured trash policy instance
 
   /** 
@@ -84,6 +88,7 @@ public class Trash extends Configured {
 } catch (Exception e) {
   // If we can not determine that trash is enabled server side then
   // bail rather than potentially deleting a file when trash is enabled.
+  LOG.warn(Failed to get server trash configuration, e);
   throw new IOException(Failed to get server trash configuration, e);
 }
 Trash trash = new Trash(fullyResolvedFs, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a8d452/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
index cfb51e2..d6a9b4b 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicyDefault.java
@@ -134,11 +134,11 @@ public class TrashPolicyDefault extends TrashPolicy {
 for (int i = 0; i  2; i++) {
   try {
 if (!fs.mkdirs(baseTrashPath, PERMISSION)) {  // create current
-  LOG.warn(Can't create(mkdir) trash directory: +baseTrashPath);
+  LOG.warn(Can't create(mkdir) trash directory:  + baseTrashPath);
   return false;
 }
   } catch (IOException e) {
-LOG.warn(Can't create trash directory: +baseTrashPath);
+LOG.warn(Can't create trash directory:  + baseTrashPath, e);
 cause = e;
 break;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95a8d452/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 1acb906..0da2516 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -243,6 +243,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8153. Error Message points to wrong parent directory in case of
 path component name length error (Anu Engineer via jitendra)
 
+HDFS-8179. DFSClient#getServerDefaults returns null within 1
+hour of system start. (Xiaoyu Yao via Arpit Agarwal)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES


hadoop git commit: YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler. (Craig Welch via wangda)

2015-04-20 Thread wangda
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 95a8d452c - d0ea982e6


YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler. (Craig 
Welch via wangda)

(cherry picked from commit 44872b76fcc0ddfbc7b0a4e54eef50fe8708e0f5)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d0ea982e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d0ea982e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d0ea982e

Branch: refs/heads/branch-2
Commit: d0ea982e6428089bca5fa82923b128414a5459ff
Parents: 95a8d45
Author: Wangda Tan wan...@apache.org
Authored: Mon Apr 20 17:12:32 2015 -0700
Committer: Wangda Tan wan...@apache.org
Committed: Mon Apr 20 17:13:01 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt |   3 +
 .../ProportionalCapacityPreemptionPolicy.java   |   5 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  24 -
 .../CapacitySchedulerConfiguration.java |  31 +-
 .../scheduler/capacity/LeafQueue.java   |  63 +---
 .../AbstractComparatorOrderingPolicy.java   |  13 +--
 .../scheduler/policy/FifoOrderingPolicy.java|   7 +-
 .../scheduler/policy/OrderingPolicy.java|   4 +-
 .../webapp/CapacitySchedulerPage.java   |   1 +
 .../dao/CapacitySchedulerLeafQueueInfo.java |   9 ++
 ...estProportionalCapacityPreemptionPolicy.java |  14 ++-
 .../capacity/TestApplicationLimits.java |  15 +--
 .../scheduler/capacity/TestLeafQueue.java   | 102 ++-
 13 files changed, 242 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0ea982e/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ef449a5..67da106 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -39,6 +39,9 @@ Release 2.8.0 - UNRELEASED
 YARN-1402. Update related Web UI and CLI with exposing client API to check
 log aggregation status. (Xuan Gong via junping_du)
 
+YARN-3463. Integrate OrderingPolicy Framework with CapacityScheduler.
+(Craig Welch via wangda)
+
   IMPROVEMENTS
 
 YARN-1880. Cleanup TestApplicationClientProtocolOnHA

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0ea982e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
index 87a2a00..2ab4197 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
@@ -550,9 +550,8 @@ public class ProportionalCapacityPreemptionPolicy 
implements SchedulingEditPolic
 
 // lock the leafqueue while we scan applications and unreserve
 synchronized (qT.leafQueue) {
-  NavigableSetFiCaSchedulerApp ns = 
-  (NavigableSetFiCaSchedulerApp) qT.leafQueue.getApplications();
-  IteratorFiCaSchedulerApp desc = ns.descendingIterator();
+  IteratorFiCaSchedulerApp desc =   
+qT.leafQueue.getOrderingPolicy().getPreemptionIterator();
   qT.actuallyPreempted = Resources.clone(resToObtain);
   while (desc.hasNext()) {
 FiCaSchedulerApp fc = desc.next();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d0ea982e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java
 

hadoop git commit: HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.

2015-04-20 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 4d0b3c51f - 44a214858


HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44a21485
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44a21485
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44a21485

Branch: refs/heads/HDFS-7285
Commit: 44a214858a60071e1de72c7f1ea8524edbeed10f
Parents: 4d0b3c5
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Mon Apr 20 17:42:02 2015 -0700
Committer: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Committed: Mon Apr 20 17:42:02 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../hadoop/hdfs/util/StripedBlockUtil.java  |  61 ---
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 178 +++
 3 files changed, 100 insertions(+), 142 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44a21485/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index c8dbf08..8f28285 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -104,3 +104,6 @@
 
 HDFS-8181. createErasureCodingZone sets retryCache state as false always
 (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8190. StripedBlockUtil.getInternalBlockLength may have overflow error.
+(szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44a21485/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
index 2368021..d622d4d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/StripedBlockUtil.java
@@ -25,6 +25,8 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 
+import com.google.common.base.Preconditions;
+
 /**
  * Utility class for analyzing striped block groups
  */
@@ -81,46 +83,43 @@ public class StripedBlockUtil {
   /**
* Get the size of an internal block at the given index of a block group
*
-   * @param numBytesInGroup Size of the block group only counting data blocks
+   * @param dataSize Size of the block group only counting data blocks
* @param cellSize The size of a striping cell
-   * @param dataBlkNum The number of data blocks
-   * @param idxInGroup The logical index in the striped block group
+   * @param numDataBlocks The number of data blocks
+   * @param i The logical index in the striped block group
* @return The size of the internal block at the specified index
*/
-  public static long getInternalBlockLength(long numBytesInGroup,
-  int cellSize, int dataBlkNum, int idxInGroup) {
+  public static long getInternalBlockLength(long dataSize,
+  int cellSize, int numDataBlocks, int i) {
+Preconditions.checkArgument(dataSize = 0);
+Preconditions.checkArgument(cellSize  0);
+Preconditions.checkArgument(numDataBlocks  0);
+Preconditions.checkArgument(i = 0);
 // Size of each stripe (only counting data blocks)
-final long numBytesPerStripe = cellSize * dataBlkNum;
-assert numBytesPerStripe   0:
-getInternalBlockLength should only be called on valid striped blocks;
+final int stripeSize = cellSize * numDataBlocks;
 // If block group ends at stripe boundary, each internal block has an equal
 // share of the group
-if (numBytesInGroup % numBytesPerStripe == 0) {
-  return numBytesInGroup / dataBlkNum;
+final int lastStripeDataLen = (int)(dataSize % stripeSize);
+if (lastStripeDataLen == 0) {
+  return dataSize / numDataBlocks;
 }
 
-int numStripes = (int) ((numBytesInGroup - 1) / numBytesPerStripe + 1);
-assert numStripes = 1 : There should be at least 1 stripe;
-
-// All stripes but the last one are full stripes. The block should at least
-// contain (numStripes - 1) full cells.
-long blkSize = (numStripes - 1) * cellSize;
-
-long lastStripeLen = numBytesInGroup % numBytesPerStripe;
-// Size of parity cells should equal the size of the first cell, if it
-// is not full.
-long 

hadoop git commit: HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 and HDFS-8169. Contributed by Zhe Zhang.

2015-04-20 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 bf2f940da - 4d0b3c51f


HDFS-8188. Erasure coding: refactor client-related code to sync with HDFS-8082 
and HDFS-8169. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4d0b3c51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4d0b3c51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4d0b3c51

Branch: refs/heads/HDFS-7285
Commit: 4d0b3c51f97e821d325334d2ce805d4b05b19c1c
Parents: bf2f940
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 20 14:19:12 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 14:19:12 2015 -0700

--
 .../hdfs/client/HdfsClientConfigKeys.java   | 12 
 .../hdfs/protocol/LocatedStripedBlock.java  | 64 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 21 ++
 .../hadoop/hdfs/client/impl/DfsClientConf.java  | 21 +-
 .../hdfs/protocol/LocatedStripedBlock.java  | 73 
 .../server/blockmanagement/BlockManager.java| 25 ---
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../server/namenode/TestStripedINodeFile.java   |  3 +-
 8 files changed, 120 insertions(+), 101 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d0b3c51/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
index f9965b4..fd7b832 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java
@@ -169,6 +169,18 @@ public interface HdfsClientConfigKeys {
 int THREADPOOL_SIZE_DEFAULT = 0;
   }
 
+  /** dfs.client.read.striped configuration properties */
+  interface StripedRead {
+String PREFIX = Read.PREFIX + striped.;
+
+String  THREADPOOL_SIZE_KEY = PREFIX + threadpool.size;
+/**
+ * With default 6+3 schema, each normal read could span 6 DNs. So this
+ * default value accommodates 3 read streams
+ */
+int THREADPOOL_SIZE_DEFAULT = 18;
+  }
+
   /** dfs.http.client configuration properties */
   interface HttpClient {
 String  PREFIX = dfs.http.client.;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4d0b3c51/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
new file mode 100644
index 000..93a5948
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedStripedBlock.java
@@ -0,0 +1,64 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.protocol;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.StorageType;
+
+import java.util.Arrays;
+
+/**
+ * {@link LocatedBlock} with striped block support. For a striped block, each
+ * datanode storage is associated with a block in the block group. We need to
+ * record the index (in the striped block group) for each of them.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class LocatedStripedBlock extends LocatedBlock {
+  private int[] blockIndices;
+
+  public LocatedStripedBlock(ExtendedBlock b, DatanodeInfo[] locs,
+  String[] storageIDs, StorageType[] 

hadoop git commit: MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options which is a regression from MR1 (zxu via rkanter)

2015-04-20 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 6b454aacc - 756c25429


MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options which is 
a regression from MR1 (zxu via rkanter)

(cherry picked from commit d50e8f09287deeb51012d08e326a2ed71a6da869)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/756c2542
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/756c2542
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/756c2542

Branch: refs/heads/branch-2
Commit: 756c2542930756fef1cbff82056b418070f8d55f
Parents: 6b454aa
Author: Robert Kanter rkan...@apache.org
Authored: Mon Apr 20 14:14:08 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Mon Apr 20 14:14:49 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../mapred/LocalDistributedCacheManager.java|  6 --
 .../hadoop/mapreduce/JobResourceUploader.java   |  2 +-
 .../hadoop/mapred/TestLocalJobSubmission.java   | 92 
 4 files changed, 96 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/756c2542/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index d52527c..9c42b3d 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -89,6 +89,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
 (rchiang via rkanter)
 
+MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
+which is a regression from MR1 (zxu via rkanter)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/756c2542/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
index 1055516..8606ede 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -100,18 +100,12 @@ class LocalDistributedCacheManager {
 Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
 if (archiveClassPaths != null) {
   for (Path p : archiveClassPaths) {
-FileSystem remoteFS = p.getFileSystem(conf);
-p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory()));
 classpaths.put(p.toUri().getPath().toString(), p);
   }
 }
 Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
 if (fileClassPaths != null) {
   for (Path p : fileClassPaths) {
-FileSystem remoteFS = p.getFileSystem(conf);
-p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory()));
 classpaths.put(p.toUri().getPath().toString(), p);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/756c2542/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index eebdf88..134de35 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -127,7 +127,7 @@ class JobResourceUploader {
 Path tmp = new Path(tmpjars);
 Path newPath = copyRemoteFiles(libjarsDir, tmp, conf, replication);
 DistributedCache.addFileToClassPath(
-new Path(newPath.toUri().getPath()), conf);
+new 

hadoop git commit: MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options which is a regression from MR1 (zxu via rkanter)

2015-04-20 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk f967fd2f2 - d50e8f092


MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options which is 
a regression from MR1 (zxu via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d50e8f09
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d50e8f09
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d50e8f09

Branch: refs/heads/trunk
Commit: d50e8f09287deeb51012d08e326a2ed71a6da869
Parents: f967fd2
Author: Robert Kanter rkan...@apache.org
Authored: Mon Apr 20 14:14:08 2015 -0700
Committer: Robert Kanter rkan...@apache.org
Committed: Mon Apr 20 14:14:08 2015 -0700

--
 hadoop-mapreduce-project/CHANGES.txt|  3 +
 .../mapred/LocalDistributedCacheManager.java|  6 --
 .../hadoop/mapreduce/JobResourceUploader.java   |  2 +-
 .../hadoop/mapred/TestLocalJobSubmission.java   | 92 
 4 files changed, 96 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d50e8f09/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index c81868d..a02ae84 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -334,6 +334,9 @@ Release 2.8.0 - UNRELEASED
 MAPREDUCE-6266. Job#getTrackingURL should consistently return a proper URL
 (rchiang via rkanter)
 
+MAPREDUCE-6238. MR2 can't run local jobs with -libjars command options
+which is a regression from MR1 (zxu via rkanter)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d50e8f09/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
index 1055516..8606ede 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -100,18 +100,12 @@ class LocalDistributedCacheManager {
 Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
 if (archiveClassPaths != null) {
   for (Path p : archiveClassPaths) {
-FileSystem remoteFS = p.getFileSystem(conf);
-p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory()));
 classpaths.put(p.toUri().getPath().toString(), p);
   }
 }
 Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
 if (fileClassPaths != null) {
   for (Path p : fileClassPaths) {
-FileSystem remoteFS = p.getFileSystem(conf);
-p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
-remoteFS.getWorkingDirectory()));
 classpaths.put(p.toUri().getPath().toString(), p);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d50e8f09/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
index eebdf88..134de35 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobResourceUploader.java
@@ -127,7 +127,7 @@ class JobResourceUploader {
 Path tmp = new Path(tmpjars);
 Path newPath = copyRemoteFiles(libjarsDir, tmp, conf, replication);
 DistributedCache.addFileToClassPath(
-new Path(newPath.toUri().getPath()), conf);
+new Path(newPath.toUri().getPath()), conf, jtFs);
   }
 }
 


hadoop git commit: Set the release date for 2.7.0

2015-04-20 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 961051e56 - 27fc4fb99


Set the release date for 2.7.0

(cherry picked from commit d52de61544060a04a273114f55979e7e929a576c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/27fc4fb9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/27fc4fb9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/27fc4fb9

Branch: refs/heads/branch-2
Commit: 27fc4fb998c77d1774a933efd09b1cadfb8f5583
Parents: 961051e
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 20 20:16:58 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Apr 20 20:18:01 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/27fc4fb9/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 83fc469..8e45588 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -90,7 +90,7 @@ Release 2.7.1 - UNRELEASED
 
   BUG FIXES
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27fc4fb9/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 122c23a..5c5e59f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -246,7 +246,7 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27fc4fb9/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 9c42b3d..ef0095c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -106,7 +106,7 @@ Release 2.7.1 - UNRELEASED
 
 MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/27fc4fb9/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 67da106..446f893 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -205,7 +205,7 @@ Release 2.7.1 - UNRELEASED
 YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
 without making a copy. (Jason Lowe via jianhe)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 



hadoop git commit: Set the release date for 2.7.0

2015-04-20 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 1c8d54bc0 - 7dd5f42a7


Set the release date for 2.7.0

(cherry picked from commit d52de61544060a04a273114f55979e7e929a576c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7dd5f42a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7dd5f42a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7dd5f42a

Branch: refs/heads/branch-2.7
Commit: 7dd5f42a76453d83d1ccfeb350f517cb08aea6f0
Parents: 1c8d54b
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 20 20:16:58 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Apr 20 20:18:38 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd5f42a/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index 0b86b1f..0e7cbbe 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -12,7 +12,7 @@ Release 2.7.1 - UNRELEASED
 
   BUG FIXES
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd5f42a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index a169e87..fb7e7e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -38,7 +38,7 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd5f42a/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 8624806..82c3064 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -14,7 +14,7 @@ Release 2.7.1 - UNRELEASED
 
 MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7dd5f42a/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index ea58a31..546c6a6 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -20,7 +20,7 @@ Release 2.7.1 - UNRELEASED
 YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
 without making a copy. (Jason Lowe via jianhe)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 



[Hadoop Wiki] Trivial Update of HowToRelease by VinodKumarVavilapalli

2015-04-20 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The HowToRelease page has been changed by VinodKumarVavilapalli:
https://wiki.apache.org/hadoop/HowToRelease?action=diffrev1=68rev2=69

   1. Commit the changes {{{
  svn ci -m Publishing the bits for release ${version}
  }}}
-   1. In [[https://repository.apache.org|Nexus]], effect the release of 
artifacts by right-clicking the staged repository and select {{{Release}}}
+   1. In [[https://repository.apache.org|Nexus]], effect the release of 
artifacts by selecting the staged repository and then clicking {{{Release}}}
1. Wait 24 hours for release to propagate to mirrors.
1. Edit the website.
   1. Checkout the website if you haven't already {{{


hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B) Updated CHANGES.TXT for correct version

2015-04-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 44872b76f - ed4137ceb


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)
Updated CHANGES.TXT for correct version


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ed4137ce
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ed4137ce
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ed4137ce

Branch: refs/heads/trunk
Commit: ed4137cebf27717e9c79eae515b0b83ab6676465
Parents: 44872b7
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 21 07:59:43 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 21 07:59:43 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ed4137ce/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2d20812..2291855 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -488,9 +488,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
-HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
-goes for infinite loop (vinayakumarb)
-
 HDFS-5215. dfs.datanode.du.reserved is not considered while computing
 available space ( Brahma Reddy Battula via Yongjun Zhang)
 
@@ -564,6 +561,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8179. DFSClient#getServerDefaults returns null within 1
 hour of system start. (Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+goes for infinite loop (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B) Updated CHANGES.TXT for correct version

2015-04-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 d0ea982e6 - 961051e56


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)
Updated CHANGES.TXT for correct version

(cherry picked from commit ed4137cebf27717e9c79eae515b0b83ab6676465)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/961051e5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/961051e5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/961051e5

Branch: refs/heads/branch-2
Commit: 961051e569151d68d90f91055c8678c034c20207
Parents: d0ea982
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 21 07:59:43 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 21 08:00:37 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/961051e5/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 0da2516..122c23a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -173,9 +173,6 @@ Release 2.8.0 - UNRELEASED
 HDFS-7922. ShortCircuitCache#close is not releasing
 ScheduledThreadPoolExecutors (Rakesh R via Colin P. McCabe)
 
-HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
-goes for infinite loop (vinayakumarb)
-
 HDFS-5215. dfs.datanode.du.reserved is not considered while computing
 available space ( Brahma Reddy Battula via Yongjun Zhang)
 
@@ -246,6 +243,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8179. DFSClient#getServerDefaults returns null within 1
 hour of system start. (Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+goes for infinite loop (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES



hadoop git commit: Set the release date for 2.7.0

2015-04-20 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7.0 bb6984423 - f95b390df


Set the release date for 2.7.0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f95b390d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f95b390d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f95b390d

Branch: refs/heads/branch-2.7.0
Commit: f95b390df2ca7d599f0ad82cf6e8d980469e7abb
Parents: bb698442
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 20 20:09:52 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Apr 20 20:09:52 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f95b390d/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index dc94e74..eb02070 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop Change Log
 
-Release 2.7.0 - 2015-04-10
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f95b390d/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 90e0982..356f752 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop HDFS Change Log
 
-Release 2.7.0 - 2015-04-10
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f95b390d/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index 6649741..232f26c 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop MapReduce Change Log
 
-Release 2.7.0 - 2015-04-10
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f95b390d/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 272fd3c..27f1a19 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -1,6 +1,6 @@
 Hadoop YARN Change Log
 
-Release 2.7.0 - 2015-04-10
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 



svn commit: r8663 - /release/hadoop/common/KEYS

2015-04-20 Thread wang
Author: wang
Date: Tue Apr 21 01:37:28 2015
New Revision: 8663

Log:
Add code signing key for Andrew Wang (w...@apache.org)

Modified:
release/hadoop/common/KEYS

Modified: release/hadoop/common/KEYS
==
--- release/hadoop/common/KEYS (original)
+++ release/hadoop/common/KEYS Tue Apr 21 01:37:28 2015
@@ -3021,3 +3021,120 @@ p4L1HcGUeZhFh48oS3BCjcjT/WNowzJ0qUf7urnr
 7c1QzhwKXnnaKhz5qkZDPhm7x6ke8JfkwZjrboilUVMTktSc3Gpao+IWBwY=
 =RKMK
 -END PGP PUBLIC KEY BLOCK-
+pub   4096R/7501105C 2015-03-19
+  Key fingerprint = 4A6A C5C6 75B6 1556 8272  9C9E 08D5 1A0A 7501 105C
+uid  Andrew Wang (CODE SIGNING KEY) andrew.w...@cloudera.com
+sig 37501105C 2015-03-20  Andrew Wang (CODE SIGNING KEY) 
andrew.w...@cloudera.com
+sig  E72E74D3 2015-03-20  Andrew Wang andrew.w...@cloudera.com
+sig  AEC77EAF 2015-03-20  Todd Lipcon t...@lipcon.org
+sig  789138DF 2015-03-20  Aaron T. Myers (CODE SIGNING KEY) 
a...@apache.org
+uid  Andrew Wang (CODE SIGNING KEY) w...@apache.org
+sig 37501105C 2015-03-19  Andrew Wang (CODE SIGNING KEY) 
andrew.w...@cloudera.com
+sig  E72E74D3 2015-03-20  Andrew Wang andrew.w...@cloudera.com
+sig  AEC77EAF 2015-03-20  Todd Lipcon t...@lipcon.org
+sig  789138DF 2015-03-20  Aaron T. Myers (CODE SIGNING KEY) 
a...@apache.org
+sub   4096R/AA78AE07 2015-03-19
+sig  7501105C 2015-03-19  Andrew Wang (CODE SIGNING KEY) 
andrew.w...@cloudera.com
+
+-BEGIN PGP PUBLIC KEY BLOCK-
+Version: GnuPG v1.4.11 (GNU/Linux)
+
+mQINBFULYo0BEACzOV8r0sAxOdsvFVo2RwlMo22hD0niQYp23PHwSEai3XGV2Ldd
+opEG21z+YkwM4IXErkwdNpKRz/XMhA7PciKhC4deWnUrGbneA8619vuVN9U5Kh2i
+QREdSQv3Jbvpi3cDA3fVLdy/MK7Gckk8RM/p2QtxhyGcWP4EOg/9csymU62ZSkLM
+8uGLHb0ndUxDC8y7YB9RtCqOGlbm5QxdMKc5mOeVq75pUONZ35jIScU7FSjbRmBo
+QQPVljZXWGf1PYgmTkKyxhMd2cqVP3tT4rzx9GKE5/7SpE2Y0+5wL3LwRaR/94fv
+722TvfJIWfu6ktSo1fZFK+INJJkSvJkkEGimwA+CEWAWqABYUiFdPLutE7q7c43x
+7J7Mkbq0xnJQrt044+guySc6YDPVwPwNXf0Guh6QuL0Huvyjg4GD032YS8GF6uF5
+DpNX1aiiQq8fLv8SPP0Z0LF2F9hBGqRbhh3kBvYbixwC1xm8e5Vg9JN56hf8UJok
+emczy1tiKJYekHoEr+rJmgQlZ3PMfXqdn8OspvjzoJqONx+NK4DZA6AdGx3eKv/Q
+SwCF6H44ocnKaouzP52cMWqbCseq1IgNJa8GRctzjtwkaIqt23VxLLCZJd/BxODV
+TedsRFzm7OEUU5Pvg3PIscLzTkMnD98PIEHofBv55vrXQHNm9dUOF4hxlQARAQAB
+tDlBbmRyZXcgV2FuZyAoQ09ERSBTSUdOSU5HIEtFWSkgPGFuZHJldy53YW5nQGNs
+b3VkZXJhLmNvbT6JAjcEEwEKACEFAlULaIECGwMFCwkIBwMFFQoJCAsFFgIDAQAC
+HgECF4AACgkQCNUaCnUBEFwiqw/+KktbpggKrp8FRAXH6AfI3N67x0VCzTW3QvQi
+X5eHhgYv1CBPA6yYC/cTSdskpVSMCTZ0lLfjuNO7fLn7oUmzvvCDTkb3+f2Dbn2O
+p1gr7nd1z2UmEHExo2LhXhuZro2Tt+80ncs5HtFNmVUz3t4+/qjb5ryCW6LSMecm
+Hi4U1eicys0+i/6RaWnJRQlbjumYcg2B8TwkWfhMAtOyrRtCCddBD77fSDAhgah5
+DMEfHTtobEiSyOEdy82FDleU8ZC2RW8wYD59pGO1UTJTJMLW3ov3u5sF6pCe/NCV
+c7GyPJMDnbv/NtMCwgdUrngArsRhOBg4S+Y22QcITFrY6ojfIYtHqifYp3qQ4Jm9
+Hjn3ZQzIWzDB+4U1glVrUbrLLlsEOZePEDz1LHsHvyPhZI8mrvBkowK6ejYaWtQw
+cPiXl1Kmqx3Gy2bfxJ9z3R3z3LhjD5LNqjP621BpFMSpEewd+yvr5Egi+8cyxd0t
+/Gnd/+f65OQbf5r+7lLwlHzZgScxcFV3G+Bx3tShpnWvB3xWZMvGe98qL2Cc2/Lu
+SMkrFW3kVbVucxt8dBWwT7YiMR7W7crSTHzMvcQouqlWqSeiaxFrtRnkP3mC4Toq
+5/dCOWfMouT/EQ4P7WeFgbAo4ptObO+tn5GXTDURPvf41qO9e6kxzjQ8IHv9Glb1
+TjEMKl6JAhwEEAEKAAYFAlULakkACgkQMs9L9OcudNNBVw/9FlwI8AqB7EI4KFOS
+vVmQFEaj3q7Mqwz/HFATfiHbA39iWH3sX2J9iM5ote+rJ0ksUZnwZACWYpmIZyzD
+M7vxErKkp+CkaCBiNw81wYxGksm82+9PsjOyscmEHVr858nhhweQ/YTp4KlJcGek
+mY8RNyotsf5kKGdTgWiEyUPYH8e9cCEvLqZiwAhHVp2/uvxCU4KzqqOrhAuRpWCU
+2mwFMLxTdfe2auo6Hub5TQnj2IUV6GR7UaFEFKSfp8/0adMhGMcbn2d2hk1p8gU9
+k3Fwk7GD33tuej9vswGEP6Gw+BRscB+upaQgt/NOPcNu7lfYusqm0RcWqqke7mJA
+2H2W6BWlE7Tdz5ybi+z7RJSKGJ3KFTcApd759+gTfAYIXHroiJvf/RPiky6DuCHJ
+RsMYbJG4f7A+hk6XK/90CosLG+jASlLdMKQEHlg9A+FAVHq4q8zKXa0fwykse+Zw
+bPJNTjRYe1LaIdaQXxE7ZQC0lZ0Dsu9jOz6rshYwd3DdO3GZpxvsYr++flxFpalv
+V1t4mxeLi15sBu/ZzL2l515qJXQISgCU8sRABQ8Oj2kAyHyKSSdfHGB3gwqxB5za
+GCv7bFdku4Qr/y1apatteJvbk5S0sDvNB/yoyDqapwD/0z2Bg+gkaW+Q0PSdIGhJ
+imgrEmTxSUkcPqrMJouWHODtKK6IRgQQEQIABgUCVQtvFgAKCRBeQ8q5rsd+r8Xn
+AKCLEST9m2VaCw0uSDMEWPQNZJiSlgCfSRU3qkbIj+VX8khEwyMbllhhESyJARwE
+EAECAAYFAlULdGIACgkQIRoZ8HiRON+AywgAkIQ6/pG4YPdrMQST96JLrjLHTnm8
+uY5BemYsyJW36lgY7VRtReW2mexDG7SPEpeT4oTpBPCQw1NYiLsM3KwhO50Z0Wsq
+20fxYLMHk5xttOIkccOjXD14iB1/+u5cmxxWZ8JIdsrUb8EjWTeb3sAX+vX8hF//
+aYLMZ+J9gSos7/zotdGpBl2uON7QBQFnkG+ayLVaa1lzBfiaxIJCXTqOeLn/+0x7
+0AVfsYUYw+KT6QxpK09A6todHyAJ19CWwQxk2oWLDx9mYSCgmsGwAgU/QbMd5M/2
+0KXM+HV2IJypAPqkbTJBoMLajNPpbHs13B7DN4XZ8LHkPn7PNoijTXch97QwQW5k
+cmV3IFdhbmcgKENPREUgU0lHTklORyBLRVkpIDx3YW5nQGFwYWNoZS5vcmc+iQI3
+BBMBCgAhBQJVC2KNAhsDBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEAjVGgp1
+ARBc13AP/iB9/M+w8DTKN8dj9PNhfJ86wb9vbe6kVmr9JBtjOMrFbdNXt7l4+NVc
+tPqnwHijjS6lTf/K1se4mlBqgaO0uXl4m+LIXZub5r525L4ZB5vGxaqkS6/UFK0o
+JaHYxqRQxVfqS9j0uo3DtM1CCRzzaO3rqig1WHuGPhAwRha7DvKHeCEn54/n8SP2
+yND65gIaKNSBQXC4s5AyekHlCYH+lnLPEyXHcZSOKknpC4avuYGrawVPTIB7QCNS
+wwHZFHIIUiGwvk/c19eSzMUMF6XB7htb8FIT3yyC1wTzoDWK9pKwCV/E9tXFMJkt

hadoop git commit: HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes for infinite loop (Contributed by Vinayakumar B)

2015-04-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 cc42ad03d - 1c8d54bc0


HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor goes 
for infinite loop (Contributed by Vinayakumar B)

(cherry picked from commit 867d5d2675b8fb73c40fac1e581b02b005459d95)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c8d54bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c8d54bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c8d54bc

Branch: refs/heads/branch-2.7
Commit: 1c8d54bc04cf12a404a8d53e87eb198c16720ac8
Parents: cc42ad0
Author: Vinayakumar B vinayakum...@apache.org
Authored: Thu Apr 2 08:12:00 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Tue Apr 21 07:45:59 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt  | 3 +++
 .../hadoop/hdfs/server/datanode/ReportBadBlockAction.java| 8 ++--
 2 files changed, 9 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c8d54bc/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index b2d2aff..a169e87 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -35,6 +35,9 @@ Release 2.7.1 - UNRELEASED
 HDFS-8179. DFSClient#getServerDefaults returns null within 1
 hour of system start. (Xiaoyu Yao via Arpit Agarwal)
 
+HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
+goes for infinite loop (vinayakumarb)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c8d54bc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
index fd01a01..991b56d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ReportBadBlockAction.java
@@ -26,6 +26,7 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.ipc.StandbyException;
 
 /**
  * ReportBadBlockAction is an instruction issued by {{BPOfferService}} to
@@ -58,8 +59,11 @@ public class ReportBadBlockAction implements 
BPServiceActorAction {
 dnArr, uuids, types) };
 
 try {
-  bpNamenode.reportBadBlocks(locatedBlock);  
-} catch (IOException e){
+  bpNamenode.reportBadBlocks(locatedBlock);
+} catch (StandbyException e) {
+  DataNode.LOG.warn(Failed to report bad block  + block
+  +  to standby namenode);
+} catch (IOException e) {
   throw new BPServiceActorActionException(Failed to report bad block 
   + block +  to namenode: );
 }



hadoop git commit: Set the release date for 2.7.0

2015-04-20 Thread vinodkv
Repository: hadoop
Updated Branches:
  refs/heads/trunk ed4137ceb - d52de6154


Set the release date for 2.7.0


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d52de615
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d52de615
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d52de615

Branch: refs/heads/trunk
Commit: d52de61544060a04a273114f55979e7e929a576c
Parents: ed4137c
Author: Vinod Kumar Vavilapalli vino...@apache.org
Authored: Mon Apr 20 20:16:58 2015 -0700
Committer: Vinod Kumar Vavilapalli vino...@apache.org
Committed: Mon Apr 20 20:16:58 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES.txt | 2 +-
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 2 +-
 hadoop-mapreduce-project/CHANGES.txt| 2 +-
 hadoop-yarn-project/CHANGES.txt | 2 +-
 4 files changed, 4 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d52de615/hadoop-common-project/hadoop-common/CHANGES.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt 
b/hadoop-common-project/hadoop-common/CHANGES.txt
index a48baf8..230717c 100644
--- a/hadoop-common-project/hadoop-common/CHANGES.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES.txt
@@ -531,7 +531,7 @@ Release 2.7.1 - UNRELEASED
 
   BUG FIXES
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d52de615/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 2291855..1aa9ce4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -564,7 +564,7 @@ Release 2.7.1 - UNRELEASED
 HDFS-7916. 'reportBadBlocks' from datanodes to standby Node BPServiceActor
 goes for infinite loop (vinayakumarb)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d52de615/hadoop-mapreduce-project/CHANGES.txt
--
diff --git a/hadoop-mapreduce-project/CHANGES.txt 
b/hadoop-mapreduce-project/CHANGES.txt
index a02ae84..ffa01fa 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -351,7 +351,7 @@ Release 2.7.1 - UNRELEASED
 
 MAPREDUCE-6300. Task list sort by task id broken. (Siqi Li via aajisaka)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d52de615/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 3f1551f..b0d3bd9 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -250,7 +250,7 @@ Release 2.7.1 - UNRELEASED
 YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
 without making a copy. (Jason Lowe via jianhe)
 
-Release 2.7.0 - UNRELEASED
+Release 2.7.0 - 2015-04-20
 
   INCOMPATIBLE CHANGES
 



hadoop git commit: HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to create (Contributed by surendra singh lilhore)

2015-04-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 5d3a4d51b - c562e1b19


HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to 
create (Contributed by surendra singh lilhore)

(cherry picked from commit c17cd4f7c163cf97d6e42865df25709f29d39e3d)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c562e1b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c562e1b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c562e1b1

Branch: refs/heads/branch-2
Commit: c562e1b19be435733d405f80f0f5b27e11dc2e7b
Parents: 5d3a4d5
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 14:58:04 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Mon Apr 20 14:58:59 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java  | 10 ++
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c562e1b1/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 7da514a..1acb906 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -208,6 +208,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8043. NPE in MiniDFSCluster teardown. (Brahma Reddy Battula via ozawa)
 
+HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to
+create (surendra singh lilhore via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c562e1b1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 723b322..a13a31f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1728,10 +1728,12 @@ public class DataNode extends ReconfigurableBase
 LOG.warn(Exception shutting down DataNode, e);
   }
 }
-try {
-  httpServer.close();
-} catch (Exception e) {
-  LOG.warn(Exception shutting down DataNode HttpServer, e);
+if (httpServer != null) {
+  try {
+httpServer.close();
+  } catch (Exception e) {
+LOG.warn(Exception shutting down DataNode HttpServer, e);
+  }
 }
 
 if (pauseMonitor != null) {



hadoop git commit: HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to create (Contributed by surendra singh lilhore)

2015-04-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5c97db07f - c17cd4f7c


HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to 
create (Contributed by surendra singh lilhore)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c17cd4f7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c17cd4f7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c17cd4f7

Branch: refs/heads/trunk
Commit: c17cd4f7c163cf97d6e42865df25709f29d39e3d
Parents: 5c97db0
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 14:58:04 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Mon Apr 20 14:58:04 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt   |  3 +++
 .../org/apache/hadoop/hdfs/server/datanode/DataNode.java  | 10 ++
 2 files changed, 9 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17cd4f7/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 9bb1fd4..8dec32e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -526,6 +526,9 @@ Release 2.8.0 - UNRELEASED
 
 HDFS-8043. NPE in MiniDFSCluster teardown. (Brahma Reddy Battula via ozawa)
 
+HDFS-8173. NPE thrown at DataNode shutdown when HTTP server was not able to
+create (surendra singh lilhore via vinayakumarb)
+
 Release 2.7.1 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c17cd4f7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 0ddb99c..e81da52 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1721,10 +1721,12 @@ public class DataNode extends ReconfigurableBase
 LOG.warn(Exception shutting down DataNode, e);
   }
 }
-try {
-  httpServer.close();
-} catch (Exception e) {
-  LOG.warn(Exception shutting down DataNode HttpServer, e);
+if (httpServer != null) {
+  try {
+httpServer.close();
+  } catch (Exception e) {
+LOG.warn(Exception shutting down DataNode HttpServer, e);
+  }
 }
 
 if (pauseMonitor != null) {



[2/3] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-20 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
deleted file mode 100644
index 2dc1d04..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.*;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.*;
-
-/**
- * A Block is a Hadoop FS primitive, identified by a 
- * long.
- *
- **/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class Block implements Writable, ComparableBlock {
-  public static final String BLOCK_FILE_PREFIX = blk_;
-  public static final String METADATA_EXTENSION = .meta;
-  static {  // register a ctor
-WritableFactories.setFactory
-  (Block.class,
-   new WritableFactory() {
- @Override
- public Writable newInstance() { return new Block(); }
-   });
-  }
-
-  public static final Pattern blockFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)$);
-  public static final Pattern metaFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)_(\\d++)\\ + METADATA_EXTENSION
-  + $);
-  public static final Pattern metaOrBlockFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)(_(\\d++)\\ + METADATA_EXTENSION
-  + )?$);
-
-  public static boolean isBlockFilename(File f) {
-String name = f.getName();
-return blockFilePattern.matcher(name).matches();
-  }
-
-  public static long filename2id(String name) {
-Matcher m = blockFilePattern.matcher(name);
-return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  public static boolean isMetaFilename(String name) {
-return metaFilePattern.matcher(name).matches();
-  }
-
-  public static File metaToBlockFile(File metaFile) {
-return new File(metaFile.getParent(), metaFile.getName().substring(
-0, metaFile.getName().lastIndexOf('_')));
-  }
-
-  /**
-   * Get generation stamp from the name of the metafile name
-   */
-  public static long getGenerationStamp(String metaFile) {
-Matcher m = metaFilePattern.matcher(metaFile);
-return m.matches() ? Long.parseLong(m.group(2))
-: HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP;
-  }
-
-  /**
-   * Get the blockId from the name of the meta or block file
-   */
-  public static long getBlockId(String metaOrBlockFile) {
-Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
-return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  private long blockId;
-  private long numBytes;
-  private long generationStamp;
-
-  public Block() {this(0, 0, 0);}
-
-  public Block(final long blkid, final long len, final long generationStamp) {
-set(blkid, len, generationStamp);
-  }
-
-  public Block(final long blkid) {
-this(blkid, 0, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP);
-  }
-
-  public Block(Block blk) {
-this(blk.blockId, blk.numBytes, blk.generationStamp);
-  }
-
-  /**
-   * Find the blockid from the given filename
-   */
-  public Block(File f, long len, long genstamp) {
-this(filename2id(f.getName()), len, genstamp);
-  }
-
-  public void set(long blkid, long len, long genStamp) {
-this.blockId = blkid;
-this.numBytes = len;
-this.generationStamp = genStamp;
-  }
-  /**
-   */
-  public long getBlockId() {
-return blockId;
-  }
-  
-  public void setBlockId(long bid) {
-blockId = bid;
-  }
-
-  /**
-   */
-  public String getBlockName() {
-return BLOCK_FILE_PREFIX + String.valueOf(blockId);
- 

hadoop git commit: HDFS-8181. createErasureCodingZone sets retryCache state as false always (Contributed by Uma Maheswara Rao G)

2015-04-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 c6114e64a - bcf4c205e


HDFS-8181. createErasureCodingZone sets retryCache state as false always 
(Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcf4c205
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcf4c205
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcf4c205

Branch: refs/heads/HDFS-7285
Commit: bcf4c205e1a4764391b9d8fb8e16c3080b8b9537
Parents: c6114e6
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 15:04:49 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Mon Apr 20 15:04:49 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 3 +++
 .../org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java | 1 +
 2 files changed, 4 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4c205/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 40517e7..87c6b1d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -90,3 +90,6 @@
 
 HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
 making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8181. createErasureCodingZone sets retryCache state as false always
+(Uma Maheswara Rao G via vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcf4c205/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 06aee59..487705a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1847,6 +1847,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 boolean success = false;
 try {
   namesystem.createErasureCodingZone(src, schema, cacheEntry != null);
+  success = true;
 } finally {
   RetryCache.setState(cacheEntry, success);
 }



hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B) Updated again

2015-04-20 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 bcf4c205e - b0fdfb054


HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)
Updated again


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0fdfb05
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0fdfb05
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0fdfb05

Branch: refs/heads/HDFS-7285
Commit: b0fdfb0548572dab229055813def7e76306564f8
Parents: bcf4c20
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 15:12:18 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Mon Apr 20 15:12:18 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 11 +++
 1 file changed, 11 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0fdfb05/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 87c6b1d..c8dbf08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -83,11 +83,22 @@
 
 HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
 
+HDFS-8120. Erasure coding: created util class to analyze striped block 
groups.
+(Contributed by Zhe Zhang and Li Bo via Jing Zhao)
+
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
 
 HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).
 
+HDFS-8166. DFSStripedOutputStream should not create empty blocks. (Jing 
Zhao)
+
+HDFS-7937. Erasure Coding: INodeFile quota computation unit tests.
+(Kai Sasaki via Jing Zhao)
+
+HDFS-8145. Fix the editlog corruption exposed by failed 
TestAddStripedBlocks.
+(Jing Zhao)
+
 HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
 making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)
 



[1/3] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/trunk 8511d8080 - 5c97db07f


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c97db07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 850b3bd..cea2b82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -140,7 +140,7 @@ class FSDirStatAndListingOp {
   }
 
   private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) {
-return inodePolicy != BlockStoragePolicySuite.ID_UNSPECIFIED ? inodePolicy 
:
+return inodePolicy != 
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? inodePolicy :
 parentPolicy;
   }
 
@@ -176,8 +176,8 @@ class FSDirStatAndListingOp {
   if (targetNode == null)
 return null;
   byte parentStoragePolicy = isSuperUser ?
-  targetNode.getStoragePolicyID() : BlockStoragePolicySuite
-  .ID_UNSPECIFIED;
+  targetNode.getStoragePolicyID() : HdfsConstantsClient
+  .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 
   if (!targetNode.isDirectory()) {
 return new DirectoryListing(
@@ -199,7 +199,7 @@ class FSDirStatAndListingOp {
 INode cur = contents.get(startChild+i);
 byte curPolicy = isSuperUser  !cur.isSymlink()?
 cur.getLocalStoragePolicyID():
-BlockStoragePolicySuite.ID_UNSPECIFIED;
+HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
 needLocation, getStoragePolicyID(curPolicy,
 parentStoragePolicy), snapshot, isRawPath, iip);
@@ -254,7 +254,7 @@ class FSDirStatAndListingOp {
 for (int i = 0; i  numOfListing; i++) {
   Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
   listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
-  BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
+  HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, 
Snapshot.CURRENT_STATE_ID,
   false, INodesInPath.fromINode(sRoot));
 }
 return new DirectoryListing(
@@ -277,7 +277,7 @@ class FSDirStatAndListingOp {
 try {
   final INode i = src.getLastINode();
   byte policyId = includeStoragePolicy  i != null  !i.isSymlink() ?
-  i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
+  i.getStoragePolicyID() : 
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
   return i == null ? null : createFileStatus(
   fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
   src.getPathSnapshotId(), isRawPath, src);
@@ -295,7 +295,7 @@ class FSDirStatAndListingOp {
   if (fsd.getINode4DotSnapshot(srcs) != null) {
 return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
 HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
-BlockStoragePolicySuite.ID_UNSPECIFIED);
+HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
   }
   return null;
 }
@@ -322,7 +322,7 @@ class FSDirStatAndListingOp {
 if (fsd.getINode4DotSnapshot(src) != null) {
   return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
   HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
-  BlockStoragePolicySuite.ID_UNSPECIFIED);
+  HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
 }
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c97db07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

[3/3] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-20 Thread wheat9
HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed 
by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c97db07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c97db07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c97db07

Branch: refs/heads/trunk
Commit: 5c97db07fb306842f49d73a67a90cecec19a7833
Parents: 8511d80
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 16 23:13:15 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Apr 20 00:36:46 2015 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   8 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  65 +++
 .../org/apache/hadoop/hdfs/protocol/Block.java  | 243 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 279 ++
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 512 ++
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  67 +++
 .../hadoop/hdfs/protocol/ExtendedBlock.java | 123 +
 .../hdfs/protocol/FsPermissionExtension.java|  89 
 .../hdfs/protocol/HdfsConstantsClient.java  |   1 +
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 271 ++
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 206 
 .../hadoop/hdfs/protocol/LocatedBlocks.java | 189 +++
 .../token/block/BlockTokenIdentifier.java   | 189 +++
 .../delegation/DelegationTokenIdentifier.java   | 100 
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  37 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  15 -
 .../org/apache/hadoop/hdfs/protocol/Block.java  | 243 -
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 279 --
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 514 ---
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  67 ---
 .../hadoop/hdfs/protocol/ExtendedBlock.java | 123 -
 .../hdfs/protocol/FsPermissionExtension.java|  89 
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 271 --
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 217 
 .../hadoop/hdfs/protocol/LocatedBlocks.java | 189 ---
 .../protocol/SnapshottableDirectoryStatus.java  |   3 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   3 +-
 .../token/block/BlockTokenIdentifier.java   | 189 ---
 .../delegation/DelegationTokenIdentifier.java   | 101 
 .../server/blockmanagement/BlockManager.java|  19 +-
 .../BlockStoragePolicySuite.java|   1 -
 .../blockmanagement/DatanodeStorageInfo.java|   3 +
 .../blockmanagement/HeartbeatManager.java   |  11 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   3 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  18 +-
 .../hdfs/server/namenode/FSDirectory.java   |   4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   7 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |   5 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   5 +-
 .../hadoop/hdfs/server/namenode/INode.java  |  13 +-
 .../hdfs/server/namenode/INodeDirectory.java|   9 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   8 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java   |   5 +-
 .../snapshot/FileWithSnapshotFeature.java   |   3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |   6 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   4 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   3 +-
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java |   3 +-
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  10 +-
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  30 --
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |   5 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |   8 +-
 .../namenode/TestNamenodeCapacityReport.java|  19 +-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |   2 +-
 55 files changed, 2476 insertions(+), 2412 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c97db07/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 43bc332..478a931 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -1,2 +1,10 @@
 FindBugsFilter
+  Match
+Or
+  Class name=org.apache.hadoop.hdfs.protocol.HdfsFileStatus/
+  Class name=org.apache.hadoop.hdfs.protocol.LocatedBlock/
+  Class 
name=org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier/
+/Or
+Bug 

[3/3] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-20 Thread wheat9
HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed 
by Haohui Mai.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5d3a4d51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5d3a4d51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5d3a4d51

Branch: refs/heads/branch-2
Commit: 5d3a4d51bde401f2073aeaa3daf15ca2d75b3449
Parents: 490fac3
Author: Haohui Mai whe...@apache.org
Authored: Thu Apr 16 23:13:15 2015 -0700
Committer: Haohui Mai whe...@apache.org
Committed: Mon Apr 20 00:36:19 2015 -0700

--
 .../dev-support/findbugsExcludeFile.xml |   8 +
 .../org/apache/hadoop/hdfs/DFSUtilClient.java   |  65 +++
 .../org/apache/hadoop/hdfs/protocol/Block.java  | 243 +
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 279 ++
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 510 ++
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  67 +++
 .../hadoop/hdfs/protocol/ExtendedBlock.java | 123 +
 .../hdfs/protocol/FsPermissionExtension.java|  89 
 .../hdfs/protocol/HdfsConstantsClient.java  |   1 +
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 271 ++
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 206 
 .../hadoop/hdfs/protocol/LocatedBlocks.java | 189 +++
 .../token/block/BlockTokenIdentifier.java   | 189 +++
 .../delegation/DelegationTokenIdentifier.java   | 101 
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  37 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |   2 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  15 -
 .../org/apache/hadoop/hdfs/protocol/Block.java  | 243 -
 .../apache/hadoop/hdfs/protocol/DatanodeID.java | 279 --
 .../hadoop/hdfs/protocol/DatanodeInfo.java  | 512 ---
 .../hdfs/protocol/DatanodeInfoWithStorage.java  |  67 ---
 .../hadoop/hdfs/protocol/ExtendedBlock.java | 123 -
 .../hdfs/protocol/FsPermissionExtension.java|  89 
 .../hadoop/hdfs/protocol/HdfsFileStatus.java| 271 --
 .../hadoop/hdfs/protocol/LocatedBlock.java  | 217 
 .../hadoop/hdfs/protocol/LocatedBlocks.java | 189 ---
 .../protocol/SnapshottableDirectoryStatus.java  |   3 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   3 +-
 .../token/block/BlockTokenIdentifier.java   | 189 ---
 .../delegation/DelegationTokenIdentifier.java   | 102 
 .../server/blockmanagement/BlockManager.java|  19 +-
 .../BlockStoragePolicySuite.java|   1 -
 .../blockmanagement/DatanodeStorageInfo.java|   3 +
 .../blockmanagement/HeartbeatManager.java   |  11 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   3 +-
 .../server/namenode/FSDirStatAndListingOp.java  |  18 +-
 .../hdfs/server/namenode/FSDirectory.java   |   4 +-
 .../hdfs/server/namenode/FSEditLogLoader.java   |   7 +-
 .../hdfs/server/namenode/FSEditLogOp.java   |   5 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   5 +-
 .../hadoop/hdfs/server/namenode/INode.java  |  13 +-
 .../hdfs/server/namenode/INodeDirectory.java|   9 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   8 +-
 .../hadoop/hdfs/server/namenode/INodeMap.java   |   5 +-
 .../snapshot/FileWithSnapshotFeature.java   |   3 +-
 .../web/resources/NamenodeWebHdfsMethods.java   |   6 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   4 +-
 .../apache/hadoop/hdfs/web/JsonUtilClient.java  |   3 +-
 .../hadoop/hdfs/web/SWebHdfsFileSystem.java |   3 +-
 .../org/apache/hadoop/hdfs/web/TokenAspect.java |  10 +-
 .../hadoop/hdfs/web/WebHdfsConstants.java   |  30 --
 .../hadoop/hdfs/web/WebHdfsFileSystem.java  |   5 +-
 .../hadoop/hdfs/TestBlockStoragePolicy.java |   8 +-
 .../namenode/TestNamenodeCapacityReport.java|  19 +-
 .../apache/hadoop/hdfs/web/TestWebHdfsUrl.java  |   2 +-
 55 files changed, 2475 insertions(+), 2411 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml 
b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
index 43bc332..478a931 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-client/dev-support/findbugsExcludeFile.xml
@@ -1,2 +1,10 @@
 FindBugsFilter
+  Match
+Or
+  Class name=org.apache.hadoop.hdfs.protocol.HdfsFileStatus/
+  Class name=org.apache.hadoop.hdfs.protocol.LocatedBlock/
+  Class 
name=org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier/
+/Or
+Bug 

[2/3] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-20 Thread wheat9
http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c97db07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
deleted file mode 100644
index 2dc1d04..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
+++ /dev/null
@@ -1,243 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * License); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an AS IS BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.protocol;
-
-import java.io.*;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.*;
-
-/**
- * A Block is a Hadoop FS primitive, identified by a 
- * long.
- *
- **/
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class Block implements Writable, ComparableBlock {
-  public static final String BLOCK_FILE_PREFIX = blk_;
-  public static final String METADATA_EXTENSION = .meta;
-  static {  // register a ctor
-WritableFactories.setFactory
-  (Block.class,
-   new WritableFactory() {
- @Override
- public Writable newInstance() { return new Block(); }
-   });
-  }
-
-  public static final Pattern blockFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)$);
-  public static final Pattern metaFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)_(\\d++)\\ + METADATA_EXTENSION
-  + $);
-  public static final Pattern metaOrBlockFilePattern = Pattern
-  .compile(BLOCK_FILE_PREFIX + (-??\\d++)(_(\\d++)\\ + METADATA_EXTENSION
-  + )?$);
-
-  public static boolean isBlockFilename(File f) {
-String name = f.getName();
-return blockFilePattern.matcher(name).matches();
-  }
-
-  public static long filename2id(String name) {
-Matcher m = blockFilePattern.matcher(name);
-return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  public static boolean isMetaFilename(String name) {
-return metaFilePattern.matcher(name).matches();
-  }
-
-  public static File metaToBlockFile(File metaFile) {
-return new File(metaFile.getParent(), metaFile.getName().substring(
-0, metaFile.getName().lastIndexOf('_')));
-  }
-
-  /**
-   * Get generation stamp from the name of the metafile name
-   */
-  public static long getGenerationStamp(String metaFile) {
-Matcher m = metaFilePattern.matcher(metaFile);
-return m.matches() ? Long.parseLong(m.group(2))
-: HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP;
-  }
-
-  /**
-   * Get the blockId from the name of the meta or block file
-   */
-  public static long getBlockId(String metaOrBlockFile) {
-Matcher m = metaOrBlockFilePattern.matcher(metaOrBlockFile);
-return m.matches() ? Long.parseLong(m.group(1)) : 0;
-  }
-
-  private long blockId;
-  private long numBytes;
-  private long generationStamp;
-
-  public Block() {this(0, 0, 0);}
-
-  public Block(final long blkid, final long len, final long generationStamp) {
-set(blkid, len, generationStamp);
-  }
-
-  public Block(final long blkid) {
-this(blkid, 0, HdfsConstantsClient.GRANDFATHER_GENERATION_STAMP);
-  }
-
-  public Block(Block blk) {
-this(blk.blockId, blk.numBytes, blk.generationStamp);
-  }
-
-  /**
-   * Find the blockid from the given filename
-   */
-  public Block(File f, long len, long genstamp) {
-this(filename2id(f.getName()), len, genstamp);
-  }
-
-  public void set(long blkid, long len, long genStamp) {
-this.blockId = blkid;
-this.numBytes = len;
-this.generationStamp = genStamp;
-  }
-  /**
-   */
-  public long getBlockId() {
-return blockId;
-  }
-  
-  public void setBlockId(long bid) {
-blockId = bid;
-  }
-
-  /**
-   */
-  public String getBlockName() {
-return BLOCK_FILE_PREFIX + String.valueOf(blockId);
- 

[1/3] hadoop git commit: HDFS-8169. Move LocatedBlocks and related classes to hdfs-client. Contributed by Haohui Mai.

2015-04-20 Thread wheat9
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 490fac391 - 5d3a4d51b


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
index 850b3bd..cea2b82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirStatAndListingOp.java
@@ -31,12 +31,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.FsPermissionExtension;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstantsClient;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.SnapshotException;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockStoragePolicySuite;
 import 
org.apache.hadoop.hdfs.server.namenode.snapshot.DirectorySnapshottableFeature;
 import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
 import org.apache.hadoop.hdfs.util.ReadOnlyList;
@@ -140,7 +140,7 @@ class FSDirStatAndListingOp {
   }
 
   private static byte getStoragePolicyID(byte inodePolicy, byte parentPolicy) {
-return inodePolicy != BlockStoragePolicySuite.ID_UNSPECIFIED ? inodePolicy 
:
+return inodePolicy != 
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED ? inodePolicy :
 parentPolicy;
   }
 
@@ -176,8 +176,8 @@ class FSDirStatAndListingOp {
   if (targetNode == null)
 return null;
   byte parentStoragePolicy = isSuperUser ?
-  targetNode.getStoragePolicyID() : BlockStoragePolicySuite
-  .ID_UNSPECIFIED;
+  targetNode.getStoragePolicyID() : HdfsConstantsClient
+  .BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 
   if (!targetNode.isDirectory()) {
 return new DirectoryListing(
@@ -199,7 +199,7 @@ class FSDirStatAndListingOp {
 INode cur = contents.get(startChild+i);
 byte curPolicy = isSuperUser  !cur.isSymlink()?
 cur.getLocalStoragePolicyID():
-BlockStoragePolicySuite.ID_UNSPECIFIED;
+HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
 listing[i] = createFileStatus(fsd, src, cur.getLocalNameBytes(), cur,
 needLocation, getStoragePolicyID(curPolicy,
 parentStoragePolicy), snapshot, isRawPath, iip);
@@ -254,7 +254,7 @@ class FSDirStatAndListingOp {
 for (int i = 0; i  numOfListing; i++) {
   Snapshot.Root sRoot = snapshots.get(i + skipSize).getRoot();
   listing[i] = createFileStatus(fsd, src, sRoot.getLocalNameBytes(), sRoot,
-  BlockStoragePolicySuite.ID_UNSPECIFIED, Snapshot.CURRENT_STATE_ID,
+  HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED, 
Snapshot.CURRENT_STATE_ID,
   false, INodesInPath.fromINode(sRoot));
 }
 return new DirectoryListing(
@@ -277,7 +277,7 @@ class FSDirStatAndListingOp {
 try {
   final INode i = src.getLastINode();
   byte policyId = includeStoragePolicy  i != null  !i.isSymlink() ?
-  i.getStoragePolicyID() : BlockStoragePolicySuite.ID_UNSPECIFIED;
+  i.getStoragePolicyID() : 
HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED;
   return i == null ? null : createFileStatus(
   fsd, path, HdfsFileStatus.EMPTY_NAME, i, policyId,
   src.getPathSnapshotId(), isRawPath, src);
@@ -295,7 +295,7 @@ class FSDirStatAndListingOp {
   if (fsd.getINode4DotSnapshot(srcs) != null) {
 return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
 HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
-BlockStoragePolicySuite.ID_UNSPECIFIED);
+HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
   }
   return null;
 }
@@ -322,7 +322,7 @@ class FSDirStatAndListingOp {
 if (fsd.getINode4DotSnapshot(src) != null) {
   return new HdfsFileStatus(0, true, 0, 0, 0, 0, null, null, null, null,
   HdfsFileStatus.EMPTY_NAME, -1L, 0, null,
-  BlockStoragePolicySuite.ID_UNSPECIFIED);
+  HdfsConstantsClient.BLOCK_STORAGE_POLICY_ID_UNSPECIFIED);
 }
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5d3a4d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

hadoop git commit: YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without making a copy. Contributed by Jason Lowe

2015-04-20 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk c17cd4f7c - f967fd2f2


YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without 
making a copy. Contributed by Jason Lowe


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f967fd2f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f967fd2f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f967fd2f

Branch: refs/heads/trunk
Commit: f967fd2f21791c5c4a5a090cc14ee88d155d2e2b
Parents: c17cd4f
Author: Jian He jia...@apache.org
Authored: Mon Apr 20 10:38:27 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Apr 20 10:38:27 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/client/api/impl/ContainerManagementProtocolProxy.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f967fd2f/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 71fde68..fa3061a 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -244,6 +244,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3487. CapacityScheduler scheduler lock obtained unnecessarily when 
 calling getQueue (Jason Lowe via wangda)
 
+YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
+without making a copy. (Jason Lowe via jianhe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f967fd2f/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
index eaf048d..94ebf0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
@@ -67,7 +67,7 @@ public class ContainerManagementProtocolProxy {
 
   public ContainerManagementProtocolProxy(Configuration conf,
   NMTokenCache nmTokenCache) {
-this.conf = conf;
+this.conf = new Configuration(conf);
 this.nmTokenCache = nmTokenCache;
 
 maxConnectedNMs =
@@ -88,7 +88,7 @@ public class ContainerManagementProtocolProxy {
   cmProxy = Collections.emptyMap();
   // Connections are not being cached so ensure connections close quickly
   // to avoid creating thousands of RPC client threads on large clusters.
-  conf.setInt(
+  this.conf.setInt(
   CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
   0);
 }



hadoop git commit: YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without making a copy. Contributed by Jason Lowe (cherry picked from commit f967fd2f21791c5c4a5a090cc14ee88d155

2015-04-20 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 c562e1b19 - 6b454aacc


YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without 
making a copy. Contributed by Jason Lowe
(cherry picked from commit f967fd2f21791c5c4a5a090cc14ee88d155d2e2b)

Conflicts:
hadoop-yarn-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6b454aac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6b454aac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6b454aac

Branch: refs/heads/branch-2
Commit: 6b454aacc34acc9fa97e94a381e76f9b41519852
Parents: c562e1b
Author: Jian He jia...@apache.org
Authored: Mon Apr 20 10:38:27 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Apr 20 10:40:09 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/client/api/impl/ContainerManagementProtocolProxy.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b454aac/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index a30d517..ef449a5 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -199,6 +199,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3462. Patches applied for YARN-2424 are inconsistent between
 trunk and branch-2. (Naganarasimha G R via harsh)
 
+YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
+without making a copy. (Jason Lowe via jianhe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6b454aac/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
index eaf048d..94ebf0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
@@ -67,7 +67,7 @@ public class ContainerManagementProtocolProxy {
 
   public ContainerManagementProtocolProxy(Configuration conf,
   NMTokenCache nmTokenCache) {
-this.conf = conf;
+this.conf = new Configuration(conf);
 this.nmTokenCache = nmTokenCache;
 
 maxConnectedNMs =
@@ -88,7 +88,7 @@ public class ContainerManagementProtocolProxy {
   cmProxy = Collections.emptyMap();
   // Connections are not being cached so ensure connections close quickly
   // to avoid creating thousands of RPC client threads on large clusters.
-  conf.setInt(
+  this.conf.setInt(
   CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
   0);
 }



hadoop git commit: YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without making a copy. Contributed by Jason Lowe (cherry picked from commit f967fd2f21791c5c4a5a090cc14ee88d155

2015-04-20 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.7 d6105d944 - b493ec28a


YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf without 
making a copy. Contributed by Jason Lowe
(cherry picked from commit f967fd2f21791c5c4a5a090cc14ee88d155d2e2b)

Conflicts:
hadoop-yarn-project/CHANGES.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b493ec28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b493ec28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b493ec28

Branch: refs/heads/branch-2.7
Commit: b493ec28a8b17db05a4aee5f850d116b397008a0
Parents: d6105d9
Author: Jian He jia...@apache.org
Authored: Mon Apr 20 10:38:27 2015 -0700
Committer: Jian He jia...@apache.org
Committed: Mon Apr 20 10:41:17 2015 -0700

--
 hadoop-yarn-project/CHANGES.txt  | 3 +++
 .../yarn/client/api/impl/ContainerManagementProtocolProxy.java   | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b493ec28/hadoop-yarn-project/CHANGES.txt
--
diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt
index 1b74021..ea58a31 100644
--- a/hadoop-yarn-project/CHANGES.txt
+++ b/hadoop-yarn-project/CHANGES.txt
@@ -17,6 +17,9 @@ Release 2.7.1 - UNRELEASED
 YARN-3462. Patches applied for YARN-2424 are inconsistent between
 trunk and branch-2. (Naganarasimha G R via harsh)
 
+YARN-3497. ContainerManagementProtocolProxy modifies IPC timeout conf
+without making a copy. (Jason Lowe via jianhe)
+
 Release 2.7.0 - UNRELEASED
 
   INCOMPATIBLE CHANGES

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b493ec28/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
index eaf048d..94ebf0d 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/ContainerManagementProtocolProxy.java
@@ -67,7 +67,7 @@ public class ContainerManagementProtocolProxy {
 
   public ContainerManagementProtocolProxy(Configuration conf,
   NMTokenCache nmTokenCache) {
-this.conf = conf;
+this.conf = new Configuration(conf);
 this.nmTokenCache = nmTokenCache;
 
 maxConnectedNMs =
@@ -88,7 +88,7 @@ public class ContainerManagementProtocolProxy {
   cmProxy = Collections.emptyMap();
   // Connections are not being cached so ensure connections close quickly
   // to avoid creating thousands of RPC client threads on large clusters.
-  conf.setInt(
+  this.conf.setInt(
   CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY,
   0);
 }



[02/50] hadoop git commit: HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks. Contributed by GAO Rui.

2015-04-20 Thread zhz
HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks. 
Contributed by GAO Rui.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/534bb0c9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/534bb0c9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/534bb0c9

Branch: refs/heads/HDFS-7285
Commit: 534bb0c984f60291d75d12b1216a92ed5dcf7abd
Parents: d800f21
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 23 15:06:53 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:03 2015 -0700

--
 .../server/blockmanagement/BlockIdManager.java |  6 ++
 .../hdfs/server/blockmanagement/BlockManager.java  | 12 +++-
 .../hdfs/server/blockmanagement/BlocksMap.java |  2 +-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java  | 17 -
 .../hadoop/hdfs/server/namenode/SafeMode.java  |  5 +++--
 .../java/org/apache/hadoop/hdfs/TestSafeMode.java  | 15 +--
 6 files changed, 42 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/534bb0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
index d9c8422..9d3e3d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockIdManager.java
@@ -234,6 +234,12 @@ public class BlockIdManager {
 return id  0;
   }
 
+  /**
+   * The last 4 bits of HdfsConstants.BLOCK_GROUP_INDEX_MASK(15) is ,
+   * so the last 4 bits of (~HdfsConstants.BLOCK_GROUP_INDEX_MASK) is 
+   * and the other 60 bits are 1. Group ID is the first 60 bits of any
+   * data/parity block id in the same striped block group.
+   */
   public static long convertToStripedID(long id) {
 return id  (~HdfsConstants.BLOCK_GROUP_INDEX_MASK);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/534bb0c9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0bfe0cd..6731524 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -686,8 +686,10 @@ public class BlockManager {
 // a forced completion when a file is getting closed by an
 // OP_CLOSE edit on the standby).
 namesystem.adjustSafeModeBlockTotals(0, 1);
+final int minStorage = curBlock.isStriped() ?
+((BlockInfoStriped) curBlock).getDataBlockNum() : minReplication;
 namesystem.incrementSafeBlockCount(
-Math.min(numNodes, minReplication));
+Math.min(numNodes, minStorage), curBlock);
 
 // replace block in the blocksMap
 return blocksMap.replaceBlock(completeBlock);
@@ -2217,7 +2219,7 @@ public class BlockManager {
 // refer HDFS-5283
 if (namesystem.isInSnapshot(storedBlock.getBlockCollection())) {
   int numOfReplicas = BlockInfo.getNumExpectedLocations(storedBlock);
-  namesystem.incrementSafeBlockCount(numOfReplicas);
+  namesystem.incrementSafeBlockCount(numOfReplicas, storedBlock);
 }
 //and fall through to next clause
   }  
@@ -2598,14 +2600,14 @@ public class BlockManager {
   // only complete blocks are counted towards that.
   // In the case that the block just became complete above, completeBlock()
   // handles the safe block count maintenance.
-  namesystem.incrementSafeBlockCount(numCurrentReplica);
+  namesystem.incrementSafeBlockCount(numCurrentReplica, storedBlock);
 }
   }
 
   /**
* Modify (block--datanode) map. Remove block from set of
* needed replications if this takes care of the problem.
-   * @return the block that is stored in blockMap.
+   * @return the block that is stored in blocksMap.
*/
   private Block addStoredBlock(final BlockInfo block,
final Block reportedBlock,
@@ -2674,7 +2676,7 @@ public class BlockManager {

[05/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, HDFS-7435 and HDFS-7930 (this commit is for HDFS-7930 only)

2015-04-20 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, 
HDFS-7435 and HDFS-7930 (this commit is for HDFS-7930 only)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d800f21d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d800f21d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d800f21d

Branch: refs/heads/HDFS-7285
Commit: d800f21d903b3162898b7615e929b41dc35bc0e3
Parents: 916a1ea
Author: Zhe Zhang z...@apache.org
Authored: Mon Mar 23 11:25:40 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:03 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 7 ---
 .../org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java  | 7 ---
 .../org/apache/hadoop/hdfs/server/namenode/INodeFile.java | 2 +-
 3 files changed, 9 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d800f21d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f7d1bf9..0bfe0cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2123,17 +2123,18 @@ public class BlockManager {
* Mark block replicas as corrupt except those on the storages in 
* newStorages list.
*/
-  public void markBlockReplicasAsCorrupt(BlockInfoContiguous block, 
+  public void markBlockReplicasAsCorrupt(Block oldBlock,
+  BlockInfo block,
   long oldGenerationStamp, long oldNumBytes, 
   DatanodeStorageInfo[] newStorages) throws IOException {
 assert namesystem.hasWriteLock();
 BlockToMarkCorrupt b = null;
 if (block.getGenerationStamp() != oldGenerationStamp) {
-  b = new BlockToMarkCorrupt(block, oldGenerationStamp,
+  b = new BlockToMarkCorrupt(oldBlock, block, oldGenerationStamp,
   genstamp does not match  + oldGenerationStamp
   +  :  + block.getGenerationStamp(), Reason.GENSTAMP_MISMATCH);
 } else if (block.getNumBytes() != oldNumBytes) {
-  b = new BlockToMarkCorrupt(block,
+  b = new BlockToMarkCorrupt(oldBlock, block,
   length does not match  + oldNumBytes
   +  :  + block.getNumBytes(), Reason.SIZE_MISMATCH);
 } else {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d800f21d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 8b64b04..60191e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -2797,7 +2797,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   /** Compute quota change for converting a complete block to a UC block */
   private QuotaCounts computeQuotaDeltaForUCBlock(INodeFile file) {
 final QuotaCounts delta = new QuotaCounts.Builder().build();
-final BlockInfoContiguous lastBlock = file.getLastBlock();
+final BlockInfo lastBlock = file.getLastBlock();
 if (lastBlock != null) {
   final long diff = file.getPreferredBlockSize() - lastBlock.getNumBytes();
   final short repl = file.getBlockReplication();
@@ -4387,8 +4387,9 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 } else {
   iFile.convertLastBlockToUC(storedBlock, trimmedStorageInfos);
   if (closeFile) {
-blockManager.markBlockReplicasAsCorrupt(storedBlock,
-oldGenerationStamp, oldNumBytes, trimmedStorageInfos);
+blockManager.markBlockReplicasAsCorrupt(oldBlock.getLocalBlock(),
+storedBlock, oldGenerationStamp, oldNumBytes,
+trimmedStorageInfos);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d800f21d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
--
diff --git 

[14/50] hadoop git commit: HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui Zheng.

2015-04-20 Thread zhz
HDFS-7617. Add unit tests for editlog transactions for EC. Contributed by Hui 
Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/edd1bc68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/edd1bc68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/edd1bc68

Branch: refs/heads/HDFS-7285
Commit: edd1bc6866215735d80e5efbdbf8225bf30e0add
Parents: 646f463
Author: Zhe Zhang z...@apache.org
Authored: Tue Mar 31 10:46:04 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:06 2015 -0700

--
 .../server/namenode/TestFSEditLogLoader.java| 157 +++
 1 file changed, 157 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/edd1bc68/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
index 833ef95..d3cb749 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSEditLogLoader.java
@@ -39,14 +39,18 @@ import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import 
org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.test.PathUtils;
 import org.apache.log4j.Level;
@@ -414,4 +418,157 @@ public class TestFSEditLogLoader {
   fromByte(code), FSEditLogOpCodes.fromByte(code));
 }
   }
+
+  @Test
+  public void testAddNewStripedBlock() throws IOException{
+// start a cluster
+Configuration conf = new HdfsConfiguration();
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(9)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  FSNamesystem fns = cluster.getNamesystem();
+
+  String testDir = /ec;
+  String testFile = testfile_001;
+  String testFilePath = testDir + / + testFile;
+  String clientName = testUser1;
+  String clientMachine = testMachine1;
+  long blkId = 1;
+  long blkNumBytes = 1024;
+  long timestamp = 1426222918;
+  short blockNum = HdfsConstants.NUM_DATA_BLOCKS;
+  short parityNum = HdfsConstants.NUM_PARITY_BLOCKS;
+
+  //set the storage policy of the directory
+  fs.mkdir(new Path(testDir), new FsPermission(755));
+  fs.setStoragePolicy(new Path(testDir),
+  HdfsConstants.EC_STORAGE_POLICY_NAME);
+
+  // Create a file with striped block
+  Path p = new Path(testFilePath);
+  DFSTestUtil.createFile(fs, p, 0, (short) 1, 1);
+
+  fns.enterSafeMode(false);
+  fns.saveNamespace(0, 0);
+  fns.leaveSafeMode();
+
+  // Add a striped block to the file
+  BlockInfoStriped stripedBlk = new BlockInfoStriped(
+  new Block(blkId, blkNumBytes, timestamp), blockNum, parityNum);
+  INodeFile file = (INodeFile)fns.getFSDirectory().getINode(testFilePath);
+  file.toUnderConstruction(clientName, clientMachine);
+  file.getStripedBlocksFeature().addBlock(stripedBlk);
+  fns.getEditLog().logAddBlock(testFilePath, file);
+  file.toCompleteFile(System.currentTimeMillis());
+
+  //If the block by loaded is the same as above it means that
+  //we have successfully applied the edit log to the fsimage.
+  cluster.restartNameNodes();
+  cluster.waitActive();
+  fns = cluster.getNamesystem();
+
+  INodeFile inodeLoaded = (INodeFile)fns.getFSDirectory()
+  .getINode(testFilePath);
+
+  assertTrue(inodeLoaded.isWithStripedBlocks());
+
+  BlockInfoStriped[] blks = (BlockInfoStriped[])inodeLoaded.getBlocks();
+  assertEquals(1, 

[10/50] hadoop git commit: HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery striped blocks in NameNode. Contributed by Jing Zhao.

2015-04-20 Thread zhz
HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery striped 
blocks in NameNode. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ba93dd78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ba93dd78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ba93dd78

Branch: refs/heads/HDFS-7285
Commit: ba93dd780586940e2962a93f4265cbde9359ee85
Parents: 6a2a5e7
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 30 11:25:09 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:05 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  25 ++-
 .../server/blockmanagement/BlockManager.java| 203 ++-
 .../blockmanagement/DecommissionManager.java|  85 
 .../hdfs/server/namenode/FSNamesystem.java  |   8 +-
 .../server/blockmanagement/TestNodeCount.java   |   2 +-
 .../TestOverReplicatedBlocks.java   |   4 +-
 6 files changed, 172 insertions(+), 155 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba93dd78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 30b5ee7..4a85efb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -18,11 +18,13 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+
 import java.io.DataOutput;
 import java.io.IOException;
 
+import static 
org.apache.hadoop.hdfs.protocol.HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
+
 /**
  * Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
  *
@@ -37,7 +39,6 @@ import java.io.IOException;
  * array to record the block index for each triplet.
  */
 public class BlockInfoStriped extends BlockInfo {
-  private final int   chunkSize = HdfsConstants.BLOCK_STRIPED_CHUNK_SIZE;
   private final short dataBlockNum;
   private final short parityBlockNum;
   /**
@@ -132,6 +133,22 @@ public class BlockInfoStriped extends BlockInfo {
 return i == -1 ? -1 : indices[i];
   }
 
+  /**
+   * Identify the block stored in the given datanode storage. Note that
+   * the returned block has the same block Id with the one seen/reported by the
+   * DataNode.
+   */
+  Block getBlockOnStorage(DatanodeStorageInfo storage) {
+int index = getStorageBlockIndex(storage);
+if (index  0) {
+  return null;
+} else {
+  Block block = new Block(this);
+  block.setBlockId(this.getBlockId() + index);
+  return block;
+}
+  }
+
   @Override
   boolean removeStorage(DatanodeStorageInfo storage) {
 int dnIndex = findStorageInfoFromEnd(storage);
@@ -186,8 +203,8 @@ public class BlockInfoStriped extends BlockInfo {
 // In case striped blocks, total usage by this striped blocks should
 // be the total of data blocks and parity blocks because
 // `getNumBytes` is the total of actual data block size.
-return ((getNumBytes() - 1) / (dataBlockNum * chunkSize) + 1)
-* chunkSize * parityBlockNum + getNumBytes();
+return ((getNumBytes() - 1) / (dataBlockNum * BLOCK_STRIPED_CHUNK_SIZE) + 
1)
+* BLOCK_STRIPED_CHUNK_SIZE * parityBlockNum + getNumBytes();
   }
 
   @Override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ba93dd78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0ac7b64..0af2ce9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -178,7 +178,11 @@ public class BlockManager {
   /** Store blocks - datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
 
-  

[01/50] hadoop git commit: HDFS-7369. Erasure coding: distribute recovery work for striped blocks to DataNode. Contributed by Zhe Zhang.

2015-04-20 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 b0fdfb054 - bf2f940da (forced update)


HDFS-7369. Erasure coding: distribute recovery work for striped blocks to 
DataNode. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b4191306
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b4191306
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b4191306

Branch: refs/heads/HDFS-7285
Commit: b419130665650ce146d12ec967522a93adf3b6c6
Parents: 35a5a2b
Author: Zhe Zhang z...@apache.org
Authored: Wed Mar 18 15:52:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:02 2015 -0700

--
 .../server/blockmanagement/BlockCollection.java |   5 +
 .../server/blockmanagement/BlockManager.java| 290 +--
 .../blockmanagement/DatanodeDescriptor.java |  72 -
 .../server/blockmanagement/DatanodeManager.java |  20 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   9 +-
 .../server/protocol/BlockECRecoveryCommand.java |  63 
 .../hdfs/server/protocol/DatanodeProtocol.java  |   1 +
 .../blockmanagement/BlockManagerTestUtil.java   |   2 +-
 .../blockmanagement/TestBlockManager.java   |  22 +-
 .../TestRecoverStripedBlocks.java   | 107 +++
 10 files changed, 484 insertions(+), 107 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4191306/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
index 440a081..50dd17b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockCollection.java
@@ -86,4 +86,9 @@ public interface BlockCollection {
* @return whether the block collection is under construction.
*/
   public boolean isUnderConstruction();
+
+  /**
+   * @return whether the block collection is in striping format
+   */
+  public boolean isStriped();
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b4191306/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 26468e7..f7d1bf9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -42,6 +42,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HAUtil;
@@ -533,9 +534,9 @@ public class BlockManager {
 
 NumberReplicas numReplicas = new NumberReplicas();
 // source node returned is not used
-chooseSourceDatanode(block, containingNodes,
+chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-UnderReplicatedBlocks.LEVEL);
+new LinkedListShort(), 1, UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1333,15 +1334,15 @@ public class BlockManager {
   }
 
   /**
-   * Scan blocks in {@link #neededReplications} and assign replication
-   * work to data-nodes they belong to.
+   * Scan blocks in {@link #neededReplications} and assign recovery
+   * (replication or erasure coding) work to data-nodes they belong to.
*
* The number of process blocks equals either twice the number of live
* data-nodes or the number of under-replicated blocks whichever is less.
*
* @return number of blocks scheduled for replication during this iteration.
*/
-  int computeReplicationWork(int blocksToProcess) {
+  int computeBlockRecoveryWork(int blocksToProcess) {
 ListListBlockInfo 

[06/50] hadoop git commit: HDFS-7716. Add a test for BlockGroup support in FSImage. Contributed by Takuya Fukudome

2015-04-20 Thread zhz
HDFS-7716. Add a test for BlockGroup support in FSImage.  Contributed by Takuya 
Fukudome


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bd49cbb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bd49cbb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bd49cbb

Branch: refs/heads/HDFS-7285
Commit: 1bd49cbb6fa03bdf6b4856bbce640fcce7c0de09
Parents: 9b73f1c
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Wed Mar 25 19:01:03 2015 +0900
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:04 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  6 ++-
 .../hdfs/server/namenode/TestFSImage.java   | 53 
 2 files changed, 58 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bd49cbb/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 2ef8527..21e4c03 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -1,4 +1,8 @@
   BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
 
 HDFS-7347. Configurable erasure coding policy for individual files and
-directories ( Zhe Zhang via vinayakumarb )
\ No newline at end of file
+directories ( Zhe Zhang via vinayakumarb )
+
+HDFS-7716. Add a test for BlockGroup support in FSImage.
+(Takuya Fukudome via szetszwo)
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bd49cbb/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
index 71dc978..440f5cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImage.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 
 import java.io.File;
@@ -31,7 +32,12 @@ import java.io.ByteArrayInputStream;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.junit.Assert;
 
@@ -46,6 +52,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSOutputStream;
+import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -378,4 +385,50 @@ public class TestFSImage {
   FileUtil.fullyDelete(dfsDir);
 }
   }
+
+  /**
+   * Ensure that FSImage supports BlockGroup.
+   */
+  @Test
+  public void testSupportBlockGroup() throws IOException {
+final short GROUP_SIZE = HdfsConstants.NUM_DATA_BLOCKS +
+HdfsConstants.NUM_PARITY_BLOCKS;
+final int BLOCK_SIZE = 8 * 1024 * 1024;
+Configuration conf = new HdfsConfiguration();
+conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(conf).numDataNodes(GROUP_SIZE)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  fs.setStoragePolicy(new Path(/), HdfsConstants.EC_STORAGE_POLICY_NAME);
+  Path file = new Path(/striped);
+  FSDataOutputStream out = fs.create(file);
+  byte[] bytes = DFSTestUtil.generateSequentialBytes(0, BLOCK_SIZE);
+  out.write(bytes);
+  out.close();
+
+  fs.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+  fs.saveNamespace();
+  fs.setSafeMode(SafeModeAction.SAFEMODE_LEAVE);
+
+  cluster.restartNameNodes();
+  fs = cluster.getFileSystem();
+  

[18/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this commit is for conflicts from HDFS-6945). Contributed by Zhe Zhang.

2015-04-20 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this 
commit is for conflicts from HDFS-6945). Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dcb72b01
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dcb72b01
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dcb72b01

Branch: refs/heads/HDFS-7285
Commit: dcb72b01a412d4f9eef8b875244c6a8c6f6673a3
Parents: f3fefb8
Author: Zhe Zhang z...@apache.org
Authored: Thu Apr 2 11:25:58 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:07 2015 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dcb72b01/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 48ddab5..eebf8d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3684,7 +3684,7 @@ public class BlockManager {
   private void removeFromExcessReplicateMap(Block block) {
 for (DatanodeStorageInfo info : blocksMap.getStorages(block)) {
   String uuid = info.getDatanodeDescriptor().getDatanodeUuid();
-  LightWeightLinkedSetBlock excessReplicas = 
excessReplicateMap.get(uuid);
+  LightWeightLinkedSetBlockInfo excessReplicas = 
excessReplicateMap.get(uuid);
   if (excessReplicas != null) {
 if (excessReplicas.remove(block)) {
   excessBlocksCount.decrementAndGet();



[13/50] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin

2015-04-20 Thread zhz
HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f3fefb82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f3fefb82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f3fefb82

Branch: refs/heads/HDFS-7285
Commit: f3fefb8218a4eab5914b098f09fcc9172a580ff4
Parents: edd1bc6
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 2 05:12:35 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:06 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 .../src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java | 4 ++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fefb82/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b69e69a..01280db 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -29,3 +29,6 @@
 
 HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
 ( Kai Zheng )
+
+HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
+( Xinwei Qin via Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f3fefb82/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8dc3f45..27be00e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -64,7 +64,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  options.get(NUM_DATA_UNITS_KEY) +  for  + NUM_DATA_UNITS_KEY +
is found. It should be an integer);
 }
 
@@ -74,7 +74,7 @@ public class ECSchema {
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  options.get(NUM_PARITY_UNITS_KEY) +  for  + NUM_PARITY_UNITS_KEY +
is found. It should be an integer);
 }
 



[09/50] hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-04-20 Thread zhz
HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/232fae55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/232fae55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/232fae55

Branch: refs/heads/HDFS-7285
Commit: 232fae552e736358f698cf982b9682c3b6b927f4
Parents: ba93dd7
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:05 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 134 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 290 insertions(+), 211 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/232fae55/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0af2ce9..48ddab5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -540,7 +540,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedListShort(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedListShort(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1382,7 +1382,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(ListListBlockInfo blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 ListDatanodeDescriptor containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1410,13 +1410,10 @@ public class BlockManager {
 containingNodes = new ArrayList();
 ListDatanodeStorageInfo liveReplicaNodes = new ArrayList();
 NumberReplicas numReplicas = new NumberReplicas();
-ListShort missingBlockIndices = new LinkedList();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+ListShort liveBlockIndices = new ArrayList();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug(Block  + block +  cannot be recovered  +
@@ -1448,15 +1445,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i  liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i  missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, 

[17/50] hadoop git commit: HDFS-7839. Erasure coding: implement facilities in NameNode to create and manage EC zones. Contributed by Zhe Zhang

2015-04-20 Thread zhz
HDFS-7839. Erasure coding: implement facilities in NameNode to create and 
manage EC zones. Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6be49e47
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6be49e47
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6be49e47

Branch: refs/heads/HDFS-7285
Commit: 6be49e475a2b4b35daac336f1d01fc421dbb73c0
Parents: dcb72b0
Author: Zhe Zhang z...@apache.org
Authored: Thu Apr 2 22:38:29 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:07 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  15 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   8 +
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 -
 ...tNamenodeProtocolServerSideTranslatorPB.java |  14 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  16 ++
 .../BlockStoragePolicySuite.java|   5 -
 .../hdfs/server/common/HdfsServerConstants.java |   2 +
 .../namenode/ErasureCodingZoneManager.java  | 112 ++
 .../hdfs/server/namenode/FSDirRenameOp.java |   2 +
 .../hdfs/server/namenode/FSDirectory.java   |  26 +++-
 .../hdfs/server/namenode/FSNamesystem.java  |  40 +
 .../hadoop/hdfs/server/namenode/INodeFile.java  |  10 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  16 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |   9 ++
 .../hadoop/hdfs/TestBlockStoragePolicy.java |  12 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 151 +++
 .../TestBlockInitialEncoding.java   |  75 -
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java|   6 +-
 .../hdfs/server/namenode/TestFSImage.java   |  23 ++-
 .../namenode/TestRecoverStripedBlocks.java  |   7 +-
 21 files changed, 431 insertions(+), 122 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be49e47/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index cc5727f..0185461 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -2945,6 +2945,21 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
+  public void createErasureCodingZone(String src)
+  throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(createErasureCodingZone, src);
+try {
+  namenode.createErasureCodingZone(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  SafeModeException.class,
+  UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public void setXAttr(String src, String name, byte[] value, 
   EnumSetXAttrSetFlag flag) throws IOException {
 checkOpen();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be49e47/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index bafb02b..8efe344 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1363,6 +1363,14 @@ public interface ClientProtocol {
   long prevId) throws IOException;
 
   /**
+   * Create an erasure coding zone (currently with hardcoded schema)
+   * TODO: Configurable and pluggable schemas (HDFS-7337)
+   */
+  @Idempotent
+  public void createErasureCodingZone(String src)
+  throws IOException;
+
+  /**
* Set xattr of a file or directory.
* The name must be prefixed with the namespace followed by .. For example,
* user.attr.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6be49e47/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 

[20/50] hadoop git commit: HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng

2015-04-20 Thread zhz
HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da45676e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da45676e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da45676e

Branch: refs/heads/HDFS-7285
Commit: da45676e2ebc79f431a9663c35482444d35952d7
Parents: 68a1599
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:26:40 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:08 2015 -0700

--
 .../io/erasurecode/coder/RSErasureDecoder.java  |  8 +-
 .../io/erasurecode/coder/RSErasureEncoder.java  |  4 +-
 .../io/erasurecode/coder/XORErasureDecoder.java | 78 
 .../io/erasurecode/coder/XORErasureEncoder.java | 45 ++
 .../io/erasurecode/coder/XorErasureDecoder.java | 78 
 .../io/erasurecode/coder/XorErasureEncoder.java | 45 --
 .../io/erasurecode/rawcoder/JRSRawDecoder.java  | 69 ---
 .../io/erasurecode/rawcoder/JRSRawEncoder.java  | 78 
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ---
 .../io/erasurecode/rawcoder/RSRawDecoder.java   | 69 +++
 .../io/erasurecode/rawcoder/RSRawEncoder.java   | 78 
 .../rawcoder/RSRawErasureCoderFactory.java  | 34 +++
 .../io/erasurecode/rawcoder/XORRawDecoder.java  | 81 +
 .../io/erasurecode/rawcoder/XORRawEncoder.java  | 61 +
 .../rawcoder/XORRawErasureCoderFactory.java | 34 +++
 .../io/erasurecode/rawcoder/XorRawDecoder.java  | 81 -
 .../io/erasurecode/rawcoder/XorRawEncoder.java  | 61 -
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ---
 .../erasurecode/coder/TestRSErasureCoder.java   |  4 +-
 .../io/erasurecode/coder/TestXORCoder.java  | 50 +++
 .../io/erasurecode/coder/TestXorCoder.java  | 50 ---
 .../erasurecode/rawcoder/TestJRSRawCoder.java   | 93 
 .../io/erasurecode/rawcoder/TestRSRawCoder.java | 93 
 .../erasurecode/rawcoder/TestXORRawCoder.java   | 49 +++
 .../erasurecode/rawcoder/TestXorRawCoder.java   | 51 ---
 25 files changed, 680 insertions(+), 682 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da45676e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
index ba32f04..e2c5051 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
@@ -4,9 +4,9 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.erasurecode.ECBlock;
 import org.apache.hadoop.io.erasurecode.ECBlockGroup;
-import org.apache.hadoop.io.erasurecode.rawcoder.JRSRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RSRawDecoder;
 import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
-import org.apache.hadoop.io.erasurecode.rawcoder.XorRawDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.XORRawDecoder;
 
 /**
  * Reed-Solomon erasure decoder that decodes a block group.
@@ -56,7 +56,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
   rsRawDecoder = createRawDecoder(
   CommonConfigurationKeys.IO_ERASURECODE_CODEC_RS_RAWCODER_KEY);
   if (rsRawDecoder == null) {
-rsRawDecoder = new JRSRawDecoder();
+rsRawDecoder = new RSRawDecoder();
   }
   rsRawDecoder.initialize(getNumDataUnits(),
   getNumParityUnits(), getChunkSize());
@@ -66,7 +66,7 @@ public class RSErasureDecoder extends AbstractErasureDecoder {
 
   private RawErasureDecoder checkCreateXorRawDecoder() {
 if (xorRawDecoder == null) {
-  xorRawDecoder = new XorRawDecoder();
+  xorRawDecoder = new XORRawDecoder();
   xorRawDecoder.initialize(getNumDataUnits(), 1, getChunkSize());
 }
 return xorRawDecoder;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da45676e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureEncoder.java
 

[22/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit is for HDFS-8035). Contributed by Zhe Zhang

2015-04-20 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit is for HDFS-8035). Contributed by Zhe Zhang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e0f3de9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e0f3de9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e0f3de9

Branch: refs/heads/HDFS-7285
Commit: 9e0f3de90a46bff1728a532ac9e866349678243a
Parents: 7d09515
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 6 10:37:23 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:08 2015 -0700

--
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java | 11 +--
 .../apache/hadoop/hdfs/server/namenode/FSNamesystem.java |  8 
 2 files changed, 9 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e0f3de9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index eebf8d4..8956cdd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -3546,13 +3546,12 @@ public class BlockManager {
   String src, BlockInfo[] blocks) {
 for (BlockInfo b: blocks) {
   if (!b.isComplete()) {
-final BlockInfoContiguousUnderConstruction uc =
-(BlockInfoContiguousUnderConstruction)b;
 final int numNodes = b.numNodes();
-LOG.info(BLOCK*  + b +  is not COMPLETE (ucState = 
-  + uc.getBlockUCState() + , replication# =  + numNodes
-  + (numNodes  minReplication ?   :  = )
-  +  minimum =  + minReplication + ) in file  + src);
+final int min = getMinStorageNum(b);
+final BlockUCState state = b.getBlockUCState();
+LOG.info(BLOCK*  + b +  is not COMPLETE (ucState =  + state
++ , replication# =  + numNodes + (numNodes  min ?:  = 
)
++  minimum =  + min + ) in file  + src);
 return false;
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e0f3de9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 335bb4a..21ae985 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3136,7 +3136,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
*/
   LocatedBlock storeAllocatedBlock(String src, long fileId, String clientName,
   ExtendedBlock previous, DatanodeStorageInfo[] targets) throws 
IOException {
-BlockInfo newBlockInfo = null;
+Block newBlock = null;
 long offset;
 checkOperation(OperationCategory.WRITE);
 waitForLoadingFSImage();
@@ -3169,8 +3169,8 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 ExtendedBlock.getLocalBlock(previous));
 
   // allocate new block, record block locations in INode.
-  Block newBlock = createNewBlock(isStriped);
-  newBlockInfo = saveAllocatedBlock(src, fileState.iip, newBlock, targets,
+  newBlock = createNewBlock(isStriped);
+  saveAllocatedBlock(src, fileState.iip, newBlock, targets,
   isStriped);
 
   persistNewBlock(src, pendingFile);
@@ -3181,7 +3181,7 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
 getEditLog().logSync();
 
 // Return located block
-return makeLocatedBlock(newBlockInfo, targets, offset);
+return makeLocatedBlock(getStoredBlock(newBlock), targets, offset);
   }
 
   /*



[24/50] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang. Updated CHANGES-HDFS-EC-7285.txt

2015-04-20 Thread zhz
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e36d136
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e36d136
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e36d136

Branch: refs/heads/HDFS-7285
Commit: 3e36d136d5a197677abed02b511e938dfe05fcbf
Parents: 85fe8a5
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:35:18 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:09 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 2 ++
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 5 +
 2 files changed, 3 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e36d136/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 68d1d32..7716728 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -33,5 +33,7 @@
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
 
+HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
+
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e36d136/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 3874cb4..9927ccf 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,7 +49,4 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
-
-HADOOP-11740. Combine erasure encoder and decoder interfaces (Zhe Zhang)
-
+manage EC zones (Zhe Zhang)
\ No newline at end of file



[35/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by Zhe Zhang.

2015-04-20 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging trunk 
changes (this commit mainly addresses HDFS-8081 and HDFS-8048. Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec3c6837
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec3c6837
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec3c6837

Branch: refs/heads/HDFS-7285
Commit: ec3c683772ed2417be003446862ef0f4c0196e53
Parents: 414c2ba
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 10:56:24 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:38 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSInputStream.java |  4 ++--
 .../apache/hadoop/hdfs/DFSStripedInputStream.java   | 16 +---
 .../apache/hadoop/hdfs/DFSStripedOutputStream.java  |  3 ++-
 .../hadoop/hdfs/server/namenode/FSNamesystem.java   |  5 +++--
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  3 ++-
 5 files changed, 18 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec3c6837/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index 703b42e..d728fda 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1099,7 +1099,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   int offset, MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
 final int length = (int) (end - start + 1);
-actualGetFromOneDataNode(datanode, block, start, end, buf,
+actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf,
 new int[]{offset}, new int[]{length}, corruptedBlockMap);
   }
 
@@ -1118,7 +1118,7 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
*  block replica
*/
   void actualGetFromOneDataNode(final DNAddrPair datanode,
-  LocatedBlock block, final long startInBlk, final long endInBlk,
+  long blockStartOffset, final long startInBlk, final long endInBlk,
   byte[] buf, int[] offsets, int[] lengths,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec3c6837/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 077b0f8..8a431b1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -224,7 +224,7 @@ public class DFSStripedInputStream extends DFSInputStream {
* Real implementation of pread.
*/
   @Override
-  protected void fetchBlockByteRange(LocatedBlock block, long start,
+  protected void fetchBlockByteRange(long blockStartOffset, long start,
   long end, byte[] buf, int offset,
   MapExtendedBlock, SetDatanodeInfo corruptedBlockMap)
   throws IOException {
@@ -234,7 +234,7 @@ public class DFSStripedInputStream extends DFSInputStream {
 int len = (int) (end - start + 1);
 
 // Refresh the striped block group
-block = getBlockGroupAt(block.getStartOffset());
+LocatedBlock block = getBlockGroupAt(blockStartOffset);
 assert block instanceof LocatedStripedBlock : NameNode +
  should return a LocatedStripedBlock for a striped file;
 LocatedStripedBlock blockGroup = (LocatedStripedBlock) block;
@@ -254,9 +254,11 @@ public class DFSStripedInputStream extends DFSInputStream {
   DatanodeInfo loc = blks[i].getLocations()[0];
   StorageType type = blks[i].getStorageTypes()[0];
   DNAddrPair dnAddr = new DNAddrPair(loc, NetUtils.createSocketAddr(
-  loc.getXferAddr(dfsClient.getConf().connectToDnViaHostname)), type);
-  CallableVoid readCallable = getFromOneDataNode(dnAddr, blks[i],
-  rp.startOffsetInBlock, rp.startOffsetInBlock + rp.readLength - 1, 
buf,
+  loc.getXferAddr(dfsClient.getConf().isConnectToDnViaHostname())),
+  type);
+  CallableVoid readCallable = getFromOneDataNode(dnAddr,
+ 

[34/50] hadoop git commit: HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R

2015-04-20 Thread zhz
HADOOP-11818 Minor improvements for erasurecode classes. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/742eb948
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/742eb948
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/742eb948

Branch: refs/heads/HDFS-7285
Commit: 742eb94858926e584d123615ad20bdf98cd509f5
Parents: d4f26a9
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 04:31:48 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:37 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt   |  2 ++
 .../hadoop/io/erasurecode/SchemaLoader.java  | 12 ++--
 .../io/erasurecode/coder/RSErasureDecoder.java   | 19 ++-
 .../io/erasurecode/coder/RSErasureEncoder.java   | 19 ++-
 .../io/erasurecode/coder/XORErasureDecoder.java  |  2 +-
 .../io/erasurecode/rawcoder/util/RSUtil.java | 17 +
 6 files changed, 62 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/742eb948/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index c72394e..b850e11 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -40,3 +40,5 @@
 
 HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
 ( Kai Zheng via vinayakumarb )
+  
+HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742eb948/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
index c51ed37..75dd03a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -17,8 +17,8 @@
  */
 package org.apache.hadoop.io.erasurecode;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.w3c.dom.*;
@@ -36,7 +36,7 @@ import java.util.*;
  * A EC schema loading utility that loads predefined EC schemas from XML file
  */
 public class SchemaLoader {
-  private static final Log LOG = 
LogFactory.getLog(SchemaLoader.class.getName());
+  private static final Logger LOG = 
LoggerFactory.getLogger(SchemaLoader.class.getName());
 
   /**
* Load predefined ec schemas from configuration file. This file is
@@ -63,7 +63,7 @@ public class SchemaLoader {
   private ListECSchema loadSchema(File schemaFile)
   throws ParserConfigurationException, IOException, SAXException {
 
-LOG.info(Loading predefined EC schema file  + schemaFile);
+LOG.info(Loading predefined EC schema file {}, schemaFile);
 
 // Read and parse the schema file.
 DocumentBuilderFactory dbf = DocumentBuilderFactory.newInstance();
@@ -87,7 +87,7 @@ public class SchemaLoader {
   ECSchema schema = loadSchema(element);
 schemas.add(schema);
 } else {
-  LOG.warn(Bad element in EC schema configuration file:  +
+  LOG.warn(Bad element in EC schema configuration file: {},
   element.getTagName());
 }
   }
@@ -109,7 +109,7 @@ public class SchemaLoader {
   URL url = Thread.currentThread().getContextClassLoader()
   .getResource(schemaFilePath);
   if (url == null) {
-LOG.warn(schemaFilePath +  not found on the classpath.);
+LOG.warn({} not found on the classpath., schemaFilePath);
 schemaFile = null;
   } else if (! url.getProtocol().equalsIgnoreCase(file)) {
 throw new RuntimeException(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/742eb948/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/RSErasureDecoder.java
 

[30/50] hadoop git commit: HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng

2015-04-20 Thread zhz
HDFS-8074 Define a system-wide default EC schema. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5fe61cb7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5fe61cb7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5fe61cb7

Branch: refs/heads/HDFS-7285
Commit: 5fe61cb72126707281d16671b369608549c9ab1a
Parents: 00c9865
Author: Kai Zheng kai.zh...@intel.com
Authored: Thu Apr 9 01:30:02 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:36 2015 -0700

--
 .../src/main/conf/ecschema-def.xml  |  5 --
 .../apache/hadoop/io/erasurecode/ECSchema.java  | 57 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  4 +-
 .../hdfs/server/namenode/ECSchemaManager.java   | 62 
 4 files changed, 120 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe61cb7/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
index e619485..e36d386 100644
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -27,11 +27,6 @@ You can modify and remove those not used yet, or add new 
ones.
 --
 
 schemas
-  schema name=RS-6-3
-k6/k
-m3/m
-codecRS/codec
-  /schema
   schema name=RS-10-4
 k10/k
 m4/m

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe61cb7/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 27be00e..8c3310e 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -23,12 +23,12 @@ import java.util.Map;
 /**
  * Erasure coding schema to housekeeper relevant information.
  */
-public class ECSchema {
+public final class ECSchema {
   public static final String NUM_DATA_UNITS_KEY = k;
   public static final String NUM_PARITY_UNITS_KEY = m;
   public static final String CODEC_NAME_KEY = codec;
   public static final String CHUNK_SIZE_KEY = chunkSize;
-  public static final int DEFAULT_CHUNK_SIZE = 64 * 1024; // 64K
+  public static final int DEFAULT_CHUNK_SIZE = 256 * 1024; // 256K
 
   private String schemaName;
   private String codecName;
@@ -82,6 +82,18 @@ public class ECSchema {
   }
 
   /**
+   * Constructor with key parameters provided.
+   * @param schemaName
+   * @param codecName
+   * @param numDataUnits
+   * @param numParityUnits
+   */
+  public ECSchema(String schemaName, String codecName,
+  int numDataUnits, int numParityUnits) {
+this(schemaName, codecName, numDataUnits, numParityUnits, null);
+  }
+
+  /**
* Constructor with key parameters provided. Note the options may contain
* additional information for the erasure codec to interpret further.
* @param schemaName
@@ -200,4 +212,45 @@ public class ECSchema {
 
 return sb.toString();
   }
+
+  @Override
+  public boolean equals(Object o) {
+if (this == o) {
+  return true;
+}
+if (o == null || getClass() != o.getClass()) {
+  return false;
+}
+
+ECSchema ecSchema = (ECSchema) o;
+
+if (numDataUnits != ecSchema.numDataUnits) {
+  return false;
+}
+if (numParityUnits != ecSchema.numParityUnits) {
+  return false;
+}
+if (chunkSize != ecSchema.chunkSize) {
+  return false;
+}
+if (!schemaName.equals(ecSchema.schemaName)) {
+  return false;
+}
+if (!codecName.equals(ecSchema.codecName)) {
+  return false;
+}
+return options.equals(ecSchema.options);
+  }
+
+  @Override
+  public int hashCode() {
+int result = schemaName.hashCode();
+result = 31 * result + codecName.hashCode();
+result = 31 * result + options.hashCode();
+result = 31 * result + numDataUnits;
+result = 31 * result + numParityUnits;
+result = 31 * result + chunkSize;
+
+return result;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5fe61cb7/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 

[46/50] hadoop git commit: HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. Contributed by Jing Zhao.

2015-04-20 Thread zhz
HDFS-8145. Fix the editlog corruption exposed by failed TestAddStripedBlocks. 
Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/138e915e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/138e915e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/138e915e

Branch: refs/heads/HDFS-7285
Commit: 138e915eb414f6a3c254d897cdb568a67ff939e7
Parents: a265cf5
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 18:13:47 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:41 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  7 --
 .../namenode/ErasureCodingZoneManager.java  | 12 +-
 .../hdfs/server/namenode/FSDirectory.java   |  6 ++---
 .../hdfs/server/namenode/FSEditLogLoader.java   | 13 ++-
 .../hdfs/server/namenode/FSImageFormat.java |  4 +---
 .../server/namenode/FSImageSerialization.java   | 13 +--
 .../blockmanagement/TestBlockInfoStriped.java   | 23 ++--
 .../hdfs/server/namenode/TestFSImage.java   |  2 +-
 8 files changed, 31 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/138e915e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index 9f2f5ba..23e3153 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -244,13 +244,6 @@ public class BlockInfoStriped extends BlockInfo {
 return num;
   }
 
-  @Override
-  public void write(DataOutput out) throws IOException {
-out.writeShort(dataBlockNum);
-out.writeShort(parityBlockNum);
-super.write(out);
-  }
-
   /**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction -  an under construction block.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/138e915e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
index 0a84083..3f94227 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ErasureCodingZoneManager.java
@@ -54,10 +54,6 @@ public class ErasureCodingZoneManager {
 this.dir = dir;
   }
 
-  boolean getECPolicy(INodesInPath iip) throws IOException {
-return getECSchema(iip) != null;
-  }
-
   ECSchema getECSchema(INodesInPath iip) throws IOException {
 ECZoneInfo ecZoneInfo = getECZoneInfo(iip);
 return ecZoneInfo == null ? null : ecZoneInfo.getSchema();
@@ -109,7 +105,7 @@ public class ErasureCodingZoneManager {
   throw new IOException(Attempt to create an erasure coding zone  +
   for a file.);
 }
-if (getECPolicy(srcIIP)) {
+if (getECSchema(srcIIP) != null) {
   throw new IOException(Directory  + src +  is already in an  +
   erasure coding zone.);
 }
@@ -132,8 +128,10 @@ public class ErasureCodingZoneManager {
   void checkMoveValidity(INodesInPath srcIIP, INodesInPath dstIIP, String src)
   throws IOException {
 assert dir.hasReadLock();
-if (getECPolicy(srcIIP)
-!= getECPolicy(dstIIP)) {
+final ECSchema srcSchema = getECSchema(srcIIP);
+final ECSchema dstSchema = getECSchema(dstIIP);
+if ((srcSchema != null  !srcSchema.equals(dstSchema)) ||
+(dstSchema != null  !dstSchema.equals(srcSchema))) {
   throw new IOException(
   src +  can't be moved because the source and destination have  +
   different erasure coding policies.);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/138e915e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
 

[45/50] hadoop git commit: HDFS-8166. DFSStripedOutputStream should not create empty blocks. Contributed by Jing Zhao.

2015-04-20 Thread zhz
HDFS-8166. DFSStripedOutputStream should not create empty blocks. Contributed 
by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6d238c82
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6d238c82
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6d238c82

Branch: refs/heads/HDFS-7285
Commit: 6d238c82188f2536dd05cb32d608e2a2270438c9
Parents: 9078837
Author: Jing Zhao ji...@apache.org
Authored: Fri Apr 17 17:55:19 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:41 2015 -0700

--
 .../hadoop/hdfs/DFSStripedOutputStream.java | 163 +++
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  72 +++-
 .../server/blockmanagement/BlockManager.java|  17 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 162 +++---
 4 files changed, 236 insertions(+), 178 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6d238c82/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
index f11a657..7dc0091 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
@@ -22,10 +22,14 @@ import java.io.InterruptedIOException;
 import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
 import java.util.EnumSet;
 import java.util.List;
 import java.util.concurrent.BlockingQueue;
 import java.util.concurrent.LinkedBlockingQueue;
+
+import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -59,12 +63,12 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
*/
   private int cellSize = HdfsConstants.BLOCK_STRIPED_CELL_SIZE;
   private ByteBuffer[] cellBuffers;
-  private final short blockGroupBlocks = HdfsConstants.NUM_DATA_BLOCKS
+  private final short numAllBlocks = HdfsConstants.NUM_DATA_BLOCKS
   + HdfsConstants.NUM_PARITY_BLOCKS;
-  private final short blockGroupDataBlocks = HdfsConstants.NUM_DATA_BLOCKS;
+  private final short numDataBlocks = HdfsConstants.NUM_DATA_BLOCKS;
   private int curIdx = 0;
   /* bytes written in current block group */
-  private long currentBlockGroupBytes = 0;
+  //private long currentBlockGroupBytes = 0;
 
   //TODO: Use ErasureCoder interface (HDFS-7781)
   private RawErasureEncoder encoder;
@@ -73,10 +77,6 @@ public class DFSStripedOutputStream extends DFSOutputStream {
 return streamers.get(0);
   }
 
-  private long getBlockGroupSize() {
-return blockSize * HdfsConstants.NUM_DATA_BLOCKS;
-  }
-
   /** Construct a new output stream for creating a file. */
   DFSStripedOutputStream(DFSClient dfsClient, String src, HdfsFileStatus stat,
  EnumSetCreateFlag flag, Progressable progress,
@@ -84,15 +84,13 @@ public class DFSStripedOutputStream extends DFSOutputStream 
{
  throws IOException {
 super(dfsClient, src, stat, flag, progress, checksum, favoredNodes);
 DFSClient.LOG.info(Creating striped output stream);
-if (blockGroupBlocks = 1) {
-  throw new IOException(The block group must contain more than one 
block.);
-}
+checkConfiguration();
 
-cellBuffers = new ByteBuffer[blockGroupBlocks];
+cellBuffers = new ByteBuffer[numAllBlocks];
 ListBlockingQueueLocatedBlock stripeBlocks = new ArrayList();
 
-for (int i = 0; i  blockGroupBlocks; i++) {
-  stripeBlocks.add(new 
LinkedBlockingQueueLocatedBlock(blockGroupBlocks));
+for (int i = 0; i  numAllBlocks; i++) {
+  stripeBlocks.add(new LinkedBlockingQueueLocatedBlock(numAllBlocks));
   try {
 cellBuffers[i] = 
ByteBuffer.wrap(byteArrayManager.newByteArray(cellSize));
   } catch (InterruptedException ie) {
@@ -103,29 +101,38 @@ public class DFSStripedOutputStream extends 
DFSOutputStream {
   }
 }
 encoder = new RSRawEncoder();
-encoder.initialize(blockGroupDataBlocks,
-blockGroupBlocks - blockGroupDataBlocks, cellSize);
+encoder.initialize(numDataBlocks,
+numAllBlocks - numDataBlocks, cellSize);
 
-streamers = new ArrayList(blockGroupBlocks);
-for (short i = 0; i  blockGroupBlocks; i++) {
+ListStripedDataStreamer s = new 

[43/50] hadoop git commit: HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block is a striped block. Contributed by Hui Zheng.

2015-04-20 Thread zhz
HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the block 
is a striped block. Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46ce4fe8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46ce4fe8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46ce4fe8

Branch: refs/heads/HDFS-7285
Commit: 46ce4fe83b5deeb3b5c1077562d74b62f7a51e3b
Parents: 0943daf
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 17 12:05:31 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:40 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  |  2 ++
 .../hdfs/server/blockmanagement/BlockManager.java | 18 --
 2 files changed, 6 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46ce4fe8/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 78ca6d3..0ed61cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -85,3 +85,5 @@
 
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
+
+HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46ce4fe8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 77ce507..df16e42 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -2920,15 +2920,6 @@ public class BlockManager {
   }
 
   /**
-   * Set the value of whether there are any non-EC blocks using StripedID.
-   *
-   * @param has - the value of whether there are any non-EC blocks using 
StripedID.
-   */
-  public void hasNonEcBlockUsingStripedID(boolean has){
-hasNonEcBlockUsingStripedID = has;
-  }
-
-  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
@@ -3524,7 +3515,7 @@ public class BlockManager {
 if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
   info = blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId(;
-  if ((info == null)  hasNonEcBlockUsingStripedID()){
+  if ((info == null)  hasNonEcBlockUsingStripedID){
 info = blocksMap.getStoredBlock(block);
   }
 } else {
@@ -3708,10 +3699,9 @@ public class BlockManager {
*/
   public BlockInfo addBlockCollectionWithCheck(
   BlockInfo block, BlockCollection bc) {
-if (!hasNonEcBlockUsingStripedID()){
-  if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
-hasNonEcBlockUsingStripedID(true);
-  }
+if (!hasNonEcBlockUsingStripedID  !block.isStriped() 
+BlockIdManager.isStripedBlockID(block.getBlockId())) {
+  hasNonEcBlockUsingStripedID = true;
 }
 return addBlockCollection(block, bc);
   }



[49/50] hadoop git commit: HDFS-8181. createErasureCodingZone sets retryCache state as false always (Contributed by Uma Maheswara Rao G)

2015-04-20 Thread zhz
HDFS-8181. createErasureCodingZone sets retryCache state as false always 
(Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf2f940d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf2f940d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf2f940d

Branch: refs/heads/HDFS-7285
Commit: bf2f940da788c6d81ae3abf788aca9eb027493ba
Parents: 3531479
Author: Vinayakumar B vinayakum...@apache.org
Authored: Mon Apr 20 15:04:49 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:42 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt  | 14 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java   |  1 +
 2 files changed, 15 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf2f940d/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 40517e7..c8dbf08 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -83,10 +83,24 @@
 
 HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
 
+HDFS-8120. Erasure coding: created util class to analyze striped block 
groups.
+(Contributed by Zhe Zhang and Li Bo via Jing Zhao)
+
 HDFS-7994. Detect if resevered EC Block ID is already used during namenode
 startup. (Hui Zheng via szetszwo)
 
 HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).
 
+HDFS-8166. DFSStripedOutputStream should not create empty blocks. (Jing 
Zhao)
+
+HDFS-7937. Erasure Coding: INodeFile quota computation unit tests.
+(Kai Sasaki via Jing Zhao)
+
+HDFS-8145. Fix the editlog corruption exposed by failed 
TestAddStripedBlocks.
+(Jing Zhao)
+
 HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
 making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)
+
+HDFS-8181. createErasureCodingZone sets retryCache state as false always
+(Uma Maheswara Rao G via vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf2f940d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 932805f..8023889 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -1854,6 +1854,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
 boolean success = false;
 try {
   namesystem.createErasureCodingZone(src, schema, cacheEntry != null);
+  success = true;
 } finally {
   RetryCache.setState(cacheEntry, success);
 }



[36/50] hadoop git commit: HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas loaded in Namenode. (Contributed by Vinayakumar B)

2015-04-20 Thread zhz
HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all ECSchemas 
loaded in Namenode. (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/414c2ba1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/414c2ba1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/414c2ba1

Branch: refs/heads/HDFS-7285
Commit: 414c2ba1d2921c459b7b860e49f5e1c55dffe0c1
Parents: cb71eec
Author: Vinayakumar B vinayakum...@apache.org
Authored: Fri Apr 10 15:07:32 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:38 2015 -0700

--
 .../apache/hadoop/io/erasurecode/ECSchema.java  |  4 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 11 
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 +++
 .../ClientNamenodeProtocolTranslatorPB.java | 26 -
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  5 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 17 ++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  9 +++-
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   |  3 +-
 .../org/apache/hadoop/hdfs/TestECSchemas.java   | 57 
 12 files changed, 164 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
index 8c3310e..32077f6 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECSchema.java
@@ -123,12 +123,12 @@ public final class ECSchema {
 
 this.chunkSize = DEFAULT_CHUNK_SIZE;
 try {
-  if (options.containsKey(CHUNK_SIZE_KEY)) {
+  if (this.options.containsKey(CHUNK_SIZE_KEY)) {
 this.chunkSize = Integer.parseInt(options.get(CHUNK_SIZE_KEY));
   }
 } catch (NumberFormatException e) {
   throw new IllegalArgumentException(Option value  +
-  options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
+  this.options.get(CHUNK_SIZE_KEY) +  for  + CHUNK_SIZE_KEY +
is found. It should be an integer);
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 753795a..5250dfa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -58,4 +58,7 @@
 
 HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
 
-HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
+
+HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
+ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/414c2ba1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 6627124..6a4b3d9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -163,6 +163,7 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.io.retry.LossyRetryInvocationHandler;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
@@ -3100,6 +3101,16 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECSchema[] getECSchemas() throws 

[15/50] hadoop git commit: HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits (Vinayakumar B)

2015-04-20 Thread zhz
HDFS-8027. Erasure Coding: Update CHANGES-HDFS-7285.txt with branch commits 
(Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/646f4631
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/646f4631
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/646f4631

Branch: refs/heads/HDFS-7285
Commit: 646f463128fc0eb918bf58aee3bcdff1da369a64
Parents: 232fae5
Author: Vinayakumar B vinayakuma...@intel.com
Authored: Tue Mar 31 15:12:09 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:06 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 40 +++-
 1 file changed, 39 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/646f4631/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 21e4c03..a686315 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -3,6 +3,44 @@
 HDFS-7347. Configurable erasure coding policy for individual files and
 directories ( Zhe Zhang via vinayakumarb )
 
-HDFS-7716. Add a test for BlockGroup support in FSImage.
+HDFS-7339. Representing striped block groups in NameNode with hierarchical
+naming protocol ( Zhe Zhang )
+
+HDFS-7652. Process block reports for erasure coded blocks (Zhe Zhang)
+
+HDFS-7716. Erasure Coding: extend BlockInfo to handle EC info (Jing Zhao)
+
+HDFS-7749. Erasure Coding: Add striped block support in INodeFile (Jing 
Zhao)
+
+HDFS-7837. Erasure Coding: allocate and persist striped blocks in NameNode
+(Jing Zhao via Zhe Zhang)
+
+HDFS-7872. Erasure Coding: INodeFile.dumpTreeRecursively() supports to 
print
+striped blocks (Takuya Fukudome via jing9)
+
+HDFS-7853. Erasure coding: extend LocatedBlocks to support reading from
+striped files (Jing Zhao)
+
+HDFS-7826. Erasure Coding: Update INodeFile quota computation for striped
+blocks ( Kai Sasaki via jing9 )
+
+HDFS-7912. Erasure Coding: track BlockInfo instead of Block in
+UnderReplicatedBlocks and PendingReplicationBlocks (Jing Zhao)
+
+HDFS-7369. Erasure coding: distribute recovery work for striped blocks to
+DataNode (Zhe Zhang)
+
+HDFS-7864. Erasure Coding: Update safemode calculation for striped blocks
+(GAO Rui via jing9)
+
+HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage
+( Hui Zheng via jing9 )
+
+HDFS-7616. Add a test for BlockGroup support in FSImage.
 (Takuya Fukudome via szetszwo)
 
+HDFS-7907. Erasure Coding: track invalid, corrupt, and under-recovery 
striped
+blocks in NameNode (Jing Zhao)
+
+HDFS-8005. Erasure Coding: simplify striped block recovery work computation
+and add tests (Jing Zhao)
\ No newline at end of file



[31/50] hadoop git commit: HDFS-8104 Make hard-coded values consistent with the system default schema first before remove them. Contributed by Kai Zheng

2015-04-20 Thread zhz
HDFS-8104 Make hard-coded values consistent with the system default schema 
first before remove them. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4f26a9f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4f26a9f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4f26a9f

Branch: refs/heads/HDFS-7285
Commit: d4f26a9fd9ef38072c3fb7ba742a51144a8b644b
Parents: 5fe61cb
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Apr 10 00:16:28 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:36 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../hadoop/hdfs/protocol/HdfsConstants.java |  12 +-
 .../hadoop/hdfs/TestPlanReadPortions.java   | 142 +++
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 112 ---
 4 files changed, 154 insertions(+), 116 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f26a9f/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5078a15..1e695c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -54,4 +54,6 @@
 HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
 NameNode (vinayakumarb)
 
-HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
\ No newline at end of file
+HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
+
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f26a9f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index a888aa4..11c5260 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -180,11 +180,17 @@ public class HdfsConstants {
   public static final byte WARM_STORAGE_POLICY_ID = 5;
   public static final byte COLD_STORAGE_POLICY_ID = 2;
 
-  public static final byte NUM_DATA_BLOCKS = 3;
-  public static final byte NUM_PARITY_BLOCKS = 2;
+
   public static final long BLOCK_GROUP_INDEX_MASK = 15;
   public static final byte MAX_BLOCKS_IN_GROUP = 16;
 
+  /*
+   * These values correspond to the values used by the system default schema.
+   * TODO: to be removed once all places use schema.
+   */
+
+  public static final byte NUM_DATA_BLOCKS = 6;
+  public static final byte NUM_PARITY_BLOCKS = 3;
   // The chunk size for striped block which is used by erasure coding
-  public static final int BLOCK_STRIPED_CELL_SIZE = 128 * 1024;
+  public static final int BLOCK_STRIPED_CELL_SIZE = 256 * 1024;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4f26a9f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
new file mode 100644
index 000..cf84b30
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPlanReadPortions.java
@@ -0,0 +1,142 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package 

[04/50] hadoop git commit: HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai Zheng

2015-04-20 Thread zhz
HADOOP-11707. Add factory to create raw erasure coder.  Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/757c3c21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/757c3c21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/757c3c21

Branch: refs/heads/HDFS-7285
Commit: 757c3c2162a3df1b24c008c1d0ce13aa62e25212
Parents: b419130
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 20 15:07:00 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:03 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +-
 .../rawcoder/JRSRawErasureCoderFactory.java | 34 ++
 .../rawcoder/RawErasureCoderFactory.java| 38 
 .../rawcoder/XorRawErasureCoderFactory.java | 34 ++
 4 files changed, 108 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/757c3c21/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index e27ff5c..f566f0e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -24,4 +24,5 @@
 HADOOP-11706. Refine a little bit erasure coder API. Contributed by Kai 
Zheng
 ( Kai Zheng )
 
-
+HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757c3c21/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
new file mode 100644
index 000..d6b40aa
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/JRSRawErasureCoderFactory.java
@@ -0,0 +1,34 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.rawcoder;
+
+/**
+ * A raw coder factory for raw Reed-Solomon coder in Java.
+ */
+public class JRSRawErasureCoderFactory implements RawErasureCoderFactory {
+
+  @Override
+  public RawErasureEncoder createEncoder() {
+return new JRSRawEncoder();
+  }
+
+  @Override
+  public RawErasureDecoder createDecoder() {
+return new JRSRawDecoder();
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/757c3c21/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
new file mode 100644
index 000..95a1cfe
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/rawcoder/RawErasureCoderFactory.java
@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in 

[41/50] hadoop git commit: HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if this operation fails. Contributed by Rakesh R.

2015-04-20 Thread zhz
HDFS-8114. Erasure coding: Add auditlog FSNamesystem#createErasureCodingZone if 
this operation fails. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44074838
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44074838
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44074838

Branch: refs/heads/HDFS-7285
Commit: 4407483885d7eb57c6ffac650684e1df16acaa59
Parents: d4c8f10
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 11:15:02 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:39 2015 -0700

--
 .../hdfs/server/namenode/FSNamesystem.java  | 21 ++--
 1 file changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44074838/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 6cb0436..0d51d59 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -8120,11 +8120,19 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   SafeModeException, AccessControlException {
 String src = srcArg;
 HdfsFileStatus resultingStat = null;
-checkSuperuserPrivilege();
-checkOperation(OperationCategory.WRITE);
-final byte[][] pathComponents =
-FSDirectory.getPathComponentsForReservedPath(src);
-FSPermissionChecker pc = getPermissionChecker();
+FSPermissionChecker pc = null;
+byte[][] pathComponents = null;
+boolean success = false;
+try {
+  checkSuperuserPrivilege();
+  checkOperation(OperationCategory.WRITE);
+  pathComponents =
+  FSDirectory.getPathComponentsForReservedPath(src);
+  pc = getPermissionChecker();
+} catch (Throwable e) {
+  logAuditEvent(success, createErasureCodingZone, srcArg);
+  throw e;
+}
 writeLock();
 try {
   checkSuperuserPrivilege();
@@ -8138,11 +8146,12 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
   final INodesInPath iip = dir.getINodesInPath4Write(src, false);
   resultingStat = dir.getAuditFileInfo(iip);
+  success = true;
 } finally {
   writeUnlock();
 }
 getEditLog().logSync();
-logAuditEvent(true, createErasureCodingZone, srcArg, null, 
resultingStat);
+logAuditEvent(success, createErasureCodingZone, srcArg, null, 
resultingStat);
   }
 
   /**



[42/50] hadoop git commit: HDFS-7994. Detect if resevered EC Block ID is already used during namenode startup. Contributed by Hui Zheng

2015-04-20 Thread zhz
HDFS-7994. Detect if resevered EC Block ID is already used during namenode 
startup. Contributed by Hui Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0943daf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0943daf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0943daf6

Branch: refs/heads/HDFS-7285
Commit: 0943daf6b06496d2e01799296d90a08fbdf73e87
Parents: ff7de64
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Thu Apr 16 13:16:37 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:40 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   5 +-
 .../server/blockmanagement/BlockManager.java|  42 -
 .../hdfs/server/namenode/FSEditLogLoader.java   |   4 +-
 .../hdfs/server/namenode/FSImageFormat.java |   6 +-
 .../server/namenode/FSImageFormatPBINode.java   |   2 +-
 .../snapshot/FSImageFormatPBSnapshot.java   |   2 +-
 .../server/namenode/TestFSEditLogLoader.java| 106 
 .../hdfs/server/namenode/TestFSImage.java   | 169 ++-
 8 files changed, 321 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0943daf6/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index b9fc6fa..78ca6d3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -81,4 +81,7 @@
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
 separate erasurecoding proto file (Rakesh R via vinayakumarb)
 
-HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
\ No newline at end of file
+HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
+
+HDFS-7994. Detect if resevered EC Block ID is already used during namenode
+startup. (Hui Zheng via szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0943daf6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 552435e..77ce507 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -273,6 +273,9 @@ public class BlockManager {
   /** Check whether name system is running before terminating */
   private boolean checkNSRunning = true;
 
+  /** Check whether there are any non-EC blocks using StripedID */
+  private boolean hasNonEcBlockUsingStripedID = false;
+
   public BlockManager(final Namesystem namesystem, final Configuration conf)
 throws IOException {
 this.namesystem = namesystem;
@@ -2908,6 +2911,24 @@ public class BlockManager {
   }
 
   /**
+   * Get the value of whether there are any non-EC blocks using StripedID.
+   *
+   * @return Returns the value of whether there are any non-EC blocks using 
StripedID.
+   */
+  public boolean hasNonEcBlockUsingStripedID(){
+return hasNonEcBlockUsingStripedID;
+  }
+
+  /**
+   * Set the value of whether there are any non-EC blocks using StripedID.
+   *
+   * @param has - the value of whether there are any non-EC blocks using 
StripedID.
+   */
+  public void hasNonEcBlockUsingStripedID(boolean has){
+hasNonEcBlockUsingStripedID = has;
+  }
+
+  /**
* Process a single possibly misreplicated block. This adds it to the
* appropriate queues if necessary, and returns a result code indicating
* what happened with it.
@@ -3503,8 +3524,10 @@ public class BlockManager {
 if (BlockIdManager.isStripedBlockID(block.getBlockId())) {
   info = blocksMap.getStoredBlock(
   new Block(BlockIdManager.convertToStripedID(block.getBlockId(;
-}
-if (info == null) {
+  if ((info == null)  hasNonEcBlockUsingStripedID()){
+info = blocksMap.getStoredBlock(block);
+  }
+} else {
   info = blocksMap.getStoredBlock(block);
 }
 return info;
@@ -3678,6 +3701,21 @@ public class BlockManager {
 return blocksMap.addBlockCollection(block, bc);
   }
 
+  /**
+   * Do some check when adding a block to blocksmap.
+   * For HDFS-7994 to check whether then block is a NonEcBlockUsingStripedID.
+   *
+   */
+  public BlockInfo 

[50/50] hadoop git commit: HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for making it ready for transfer to DN (Contributed by Uma Maheswara Rao G)

2015-04-20 Thread zhz
HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for 
making it ready for transfer to DN (Contributed by Uma Maheswara Rao G)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/35314790
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/35314790
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/35314790

Branch: refs/heads/HDFS-7285
Commit: 35314790f7b01f9fcfc6ccb23c9622e91b717398
Parents: 138e915
Author: Vinayakumar B vinayakum...@apache.org
Authored: Sat Apr 18 23:20:45 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:42 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   3 +
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 137 ++-
 .../blockmanagement/DatanodeDescriptor.java |  31 +
 .../server/blockmanagement/DatanodeManager.java |   4 +-
 .../server/protocol/BlockECRecoveryCommand.java |  80 ++-
 .../hdfs/server/protocol/DatanodeProtocol.java  |   2 +-
 .../src/main/proto/DatanodeProtocol.proto   |   8 ++
 .../src/main/proto/erasurecoding.proto  |  13 ++
 .../hadoop/hdfs/protocolPB/TestPBHelper.java|  88 
 .../namenode/TestRecoverStripedBlocks.java  |  10 +-
 10 files changed, 335 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/35314790/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 0ed61cd..40517e7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -87,3 +87,6 @@
 startup. (Hui Zheng via szetszwo)
 
 HDFS-8167. BlockManager.addBlockCollectionWithCheck should check if the 
block is a striped block. (Hui Zheng via zhz).
+
+HDFS-8146. Protobuf changes for BlockECRecoveryCommand and its fields for
+making it ready for transfer to DN (Uma Maheswara Rao G via vinayakumarb)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/35314790/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index b9d87aa..0c6c97d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -28,6 +28,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.util.ArrayList;
 import java.util.Arrays;
+import java.util.Collection;
 import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.List;
@@ -100,7 +101,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTyp
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
-import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos;
+import org.apache.hadoop.hdfs.protocol.proto.*;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
@@ -121,6 +122,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmI
 import 
org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ShortCircuitShmSlotProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BalancerBandwidthCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockCommandProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockECRecoveryCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockIdCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.BlockRecoveryCommandProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.DatanodeCommandProto;
@@ -132,11 +134,11 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.ReceivedDele
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.RegisterCommandProto;
 import 

[28/50] hadoop git commit: HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from NameNode (Contributed by Vinayakumar B)

2015-04-20 Thread zhz
HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from 
NameNode (Contributed by Vinayakumar B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a5cfca2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a5cfca2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a5cfca2

Branch: refs/heads/HDFS-7285
Commit: 2a5cfca2f3856b19f5b2d2f66ba9c4f3e02c5fc0
Parents: 06987aa
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 12:48:59 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:31 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 ++-
 .../java/org/apache/hadoop/hdfs/DFSClient.java  | 14 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java| 10 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 19 
 .../ClientNamenodeProtocolTranslatorPB.java | 18 
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 46 
 .../hdfs/server/namenode/FSNamesystem.java  | 31 +
 .../hdfs/server/namenode/NameNodeRpcServer.java |  7 +++
 .../src/main/proto/ClientNamenodeProtocol.proto | 10 +
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 28 
 .../hadoop/hdfs/TestErasureCodingZones.java | 38 +++-
 11 files changed, 223 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a5cfca2/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9927ccf..7423033 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -49,4 +49,7 @@
 (Hui Zheng via Zhe Zhang)
 
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
-manage EC zones (Zhe Zhang)
\ No newline at end of file
+manage EC zones (Zhe Zhang)
+
+HDFS-8023. Erasure Coding: retrieve eraure coding schema for a file from
+NameNode (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a5cfca2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index f7b48eb..6627124 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -117,6 +117,7 @@ import 
org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.ECInfo;
 import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.EncryptionZoneIterator;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -3086,6 +3087,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public ECInfo getErasureCodingInfo(String src) throws IOException {
+checkOpen();
+TraceScope scope = getPathTraceScope(getErasureCodingInfo, src);
+try {
+  return namenode.getErasureCodingInfo(src);
+} catch (RemoteException re) {
+  throw re.unwrapRemoteException(AccessControlException.class,
+  FileNotFoundException.class, UnresolvedPathException.class);
+} finally {
+  scope.close();
+}
+  }
+
   public DFSInotifyEventInputStream getInotifyEventStream() throws IOException 
{
 return new DFSInotifyEventInputStream(traceSampler, namenode);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a5cfca2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 8efe344..45d92f3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1464,4 +1464,14 @@ public interface ClientProtocol {
*/
   @Idempotent
   public EventBatchList getEditsFromTxid(long txid) 

[23/50] hadoop git commit: HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure code ( Contributed by Kai Zheng)

2015-04-20 Thread zhz
HADOOP-11645. Erasure Codec API covering the essential aspects for an erasure 
code ( Contributed by Kai Zheng)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ae31ea95
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ae31ea95
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ae31ea95

Branch: refs/heads/HDFS-7285
Commit: ae31ea95910bed8b70a3da8e290e1082fa58872e
Parents: 3e36d13
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 16:05:22 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:09 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/io/erasurecode/ECBlockGroup.java | 18 
 .../erasurecode/codec/AbstractErasureCodec.java | 88 +++
 .../io/erasurecode/codec/ErasureCodec.java  | 56 
 .../io/erasurecode/codec/RSErasureCodec.java| 38 +
 .../io/erasurecode/codec/XORErasureCodec.java   | 45 ++
 .../erasurecode/coder/AbstractErasureCoder.java |  7 ++
 .../io/erasurecode/coder/ErasureCoder.java  |  7 ++
 .../io/erasurecode/grouper/BlockGrouper.java| 90 
 9 files changed, 352 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae31ea95/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 7716728..c72394e 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -37,3 +37,6 @@
 
 HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11645. Erasure Codec API covering the essential aspects for an 
erasure code
+( Kai Zheng via vinayakumarb )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae31ea95/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
index 2c851a5..0a86907 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/ECBlockGroup.java
@@ -79,4 +79,22 @@ public class ECBlockGroup {
 return false;
   }
 
+  /**
+   * Get erased blocks count
+   * @return
+   */
+  public int getErasedCount() {
+int erasedCount = 0;
+
+for (ECBlock dataBlock : dataBlocks) {
+  if (dataBlock.isErased()) erasedCount++;
+}
+
+for (ECBlock parityBlock : parityBlocks) {
+  if (parityBlock.isErased()) erasedCount++;
+}
+
+return erasedCount;
+  }
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ae31ea95/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
new file mode 100644
index 000..9993786
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/AbstractErasureCodec.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * License); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an AS IS BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.coder.*;
+import 

[25/50] hadoop git commit: HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by Xinwei Qin Updated CHANGES-HDFS-EC-7285.txt

2015-04-20 Thread zhz
HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85fe8a57
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85fe8a57
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85fe8a57

Branch: refs/heads/HDFS-7285
Commit: 85fe8a57943dcd59ece2277c9df12e9598c10402
Parents: 5eef793
Author: Vinayakumar B vinayakum...@apache.org
Authored: Tue Apr 7 15:34:37 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:09 2015 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt | 3 ---
 1 file changed, 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85fe8a57/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 4e60a7c..3874cb4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -48,9 +48,6 @@
 HDFS-7617. Add unit tests for editlog transactions for EC 
 (Hui Zheng via Zhe Zhang)
 
-HADOOP-11782. Correct two thrown messages in ECSchema class
-(Xinwei Qin via Kai Zheng)
-
 HDFS-7839. Erasure coding: implement facilities in NameNode to create and
 manage EC zones (Zhe Zhang)
 



[12/50] hadoop git commit: HADOOP-11664. Loading predefined EC schemas from configuration. Contributed by Kai Zheng.

2015-04-20 Thread zhz
HADOOP-11664. Loading predefined EC schemas from configuration. Contributed by 
Kai Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c58b6115
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c58b6115
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c58b6115

Branch: refs/heads/HDFS-7285
Commit: c58b6115fec95d5563f683dd36e24cdce8e7682c
Parents: 1bd49cb
Author: Zhe Zhang z...@apache.org
Authored: Fri Mar 27 14:52:50 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:05 2015 -0700

--
 .../src/main/conf/ecschema-def.xml  |  40 +
 .../hadoop/fs/CommonConfigurationKeys.java  |   5 +
 .../hadoop/io/erasurecode/SchemaLoader.java | 147 +++
 .../hadoop/io/erasurecode/TestSchemaLoader.java |  80 ++
 4 files changed, 272 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58b6115/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
new file mode 100644
index 000..e619485
--- /dev/null
+++ b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
@@ -0,0 +1,40 @@
+?xml version=1.0?
+
+!--
+ 
+ Licensed to the Apache Software Foundation (ASF) under one
+ or more contributor license agreements.  See the NOTICE file
+ distributed with this work for additional information
+ regarding copyright ownership.  The ASF licenses this file
+ to you under the Apache License, Version 2.0 (the
+ License); you may not use this file except in compliance
+ with the License.  You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an AS IS BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+--
+
+!--
+Please define your EC schemas here. Note, once these schemas are loaded
+and referenced by EC storage policies, any change to them will be ignored.
+You can modify and remove those not used yet, or add new ones.
+--
+
+schemas
+  schema name=RS-6-3
+k6/k
+m3/m
+codecRS/codec
+  /schema
+  schema name=RS-10-4
+k10/k
+m4/m
+codecRS/codec
+  /schema
+/schemas
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58b6115/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 70fea01..af32674 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -141,6 +141,11 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
 
+  public static final String IO_ERASURECODE_SCHEMA_FILE_KEY =
+  io.erasurecode.schema.file;
+  public static final String IO_ERASURECODE_SCHEMA_FILE_DEFAULT =
+  ecschema-def.xml;
+
   /** Use XOR raw coder when possible for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
   io.erasurecode.codec.rs.usexor;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58b6115/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
new file mode 100644
index 000..c51ed37
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ 

[38/50] hadoop git commit: HDFS-8123. Erasure Coding: Better to move EC related proto messages to a separate erasurecoding proto file (Contrubuted by Rakesh R)

2015-04-20 Thread zhz
HDFS-8123. Erasure Coding: Better to move EC related proto messages to a 
separate erasurecoding proto file (Contrubuted by Rakesh R)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8666008b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8666008b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8666008b

Branch: refs/heads/HDFS-7285
Commit: 8666008b3bef57270bbc192b837848efcfc473d7
Parents: 4407483
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 12:09:16 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:39 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|  5 +-
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  1 +
 ...tNamenodeProtocolServerSideTranslatorPB.java | 12 ++--
 .../ClientNamenodeProtocolTranslatorPB.java | 13 ++--
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  6 +-
 .../namenode/ErasureCodingZoneManager.java  |  2 +-
 .../src/main/proto/ClientNamenodeProtocol.proto | 24 +--
 .../src/main/proto/erasurecoding.proto  | 74 
 .../hadoop-hdfs/src/main/proto/hdfs.proto   | 27 ---
 9 files changed, 96 insertions(+), 68 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8666008b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 5250dfa..07bbd4a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -61,4 +61,7 @@
 HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
 
 HDFS-8090. Erasure Coding: Add RPC to client-namenode to list all
-ECSchemas loaded in Namenode. (vinayakumarb)
\ No newline at end of file
+ECSchemas loaded in Namenode. (vinayakumarb)
+
+HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8666008b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index c11b963..a13a2bd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -343,6 +343,7 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;
   includehdfs.proto/include
   includeencryption.proto/include
   includeinotify.proto/include
+  includeerasurecoding.proto/include
 /includes
   /source
   
output${project.build.directory}/generated-sources/java/output

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8666008b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
index 48f0efd..169ea2d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
@@ -107,12 +107,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDat
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeStorageReportResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetECSchemasResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetEditsFromTxidResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetErasureCodingInfoRequestProto;
-import 

[26/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-04-20 Thread zhz
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7126858a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7126858a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7126858a

Branch: refs/heads/HDFS-7285
Commit: 7126858a9ce84a7ecc7b881e46338902fb3735d2
Parents: ae31ea9
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:26:38 2015 -0700

--
 .../hadoop/hdfs/protocol/LocatedBlock.java  |   4 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  55 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |  79 +++-
 .../hadoop/hdfs/DFSStripedInputStream.java  | 367 +++
 .../hadoop/hdfs/protocol/HdfsConstants.java |   2 +-
 .../hdfs/protocol/LocatedStripedBlock.java  |   5 +
 .../blockmanagement/BlockInfoStriped.java   |   6 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  92 -
 .../apache/hadoop/hdfs/TestReadStripedFile.java | 304 +++
 .../namenode/TestRecoverStripedBlocks.java  |  88 +
 11 files changed, 897 insertions(+), 113 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7126858a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
index 4e8f202..a9596bf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
@@ -203,4 +203,8 @@ public class LocatedBlock {
 + ; locs= + Arrays.asList(locs)
 + };
   }
+
+  public boolean isStriped() {
+return false;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7126858a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 0185461..49a23d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -236,6 +236,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   private static final DFSHedgedReadMetrics HEDGED_READ_METRIC =
   new DFSHedgedReadMetrics();
   private static ThreadPoolExecutor HEDGED_READ_THREAD_POOL;
+  private static volatile ThreadPoolExecutor STRIPED_READ_THREAD_POOL;
   private final Sampler? traceSampler;
 
   public DfsClientConf getConf() {
@@ -371,6 +372,19 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 if (dfsClientConf.getHedgedReadThreadpoolSize()  0) {
   
this.initThreadsNumForHedgedReads(dfsClientConf.getHedgedReadThreadpoolSize());
 }
+numThreads = conf.getInt(
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE,
+DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+if (numThreads = 0) {
+  LOG.warn(The value of 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_SIZE
+  +  must be greater than 0. The current setting is  + numThreads
+  + . Reset it to the default value 
+  + DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE);
+  numThreads =
+  DFSConfigKeys.DFS_CLIENT_STRIPED_READ_THREADPOOL_MAX_DEFAULT_SIZE;
+}
+this.initThreadsNumForStripedReads(numThreads);
 this.saslClient = new SaslDataTransferClient(
   conf, DataTransferSaslUtil.getSaslPropertiesResolver(conf),
   TrustedChannelResolver.getInstance(conf), nnFallbackToSimpleAuth);
@@ -3142,11 +3156,52 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
+  
+  /**
+   * Create thread pool for parallel reading in striped layout,
+   * STRIPED_READ_THREAD_POOL, if it does not already exist.
+   * @param num Number of threads for striped reads thread pool.
+   */
+  private void initThreadsNumForStripedReads(int num) {
+assert num  0;
+if (STRIPED_READ_THREAD_POOL != null) {
+ 

[47/50] hadoop git commit: HADOOP-11841. Remove unused ecschema-def.xml files.

2015-04-20 Thread zhz
HADOOP-11841. Remove unused ecschema-def.xml files.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/90788377
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/90788377
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/90788377

Branch: refs/heads/HDFS-7285
Commit: 907883773bcb2dfce6d4177bc3018fff47eed0c8
Parents: 46ce4fe
Author: Tsz-Wo Nicholas Sze szets...@hortonworks.com
Authored: Fri Apr 17 16:07:07 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:41 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  2 ++
 .../src/main/conf/ecschema-def.xml  | 35 ---
 .../hadoop/fs/CommonConfigurationKeys.java  |  5 ---
 .../hadoop/io/erasurecode/SchemaLoader.java | 36 +++-
 .../hadoop/io/erasurecode/TestSchemaLoader.java | 12 ++-
 5 files changed, 25 insertions(+), 65 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/90788377/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index b850e11..9749270 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -42,3 +42,5 @@
 ( Kai Zheng via vinayakumarb )
   
 HADOOP-11818. Minor improvements for erasurecode classes. (Rakesh R via 
Kai Zheng)
+
+HADOOP-11841. Remove unused ecschema-def.xml files.  (szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90788377/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
--
diff --git a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml 
b/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
deleted file mode 100644
index e36d386..000
--- a/hadoop-common-project/hadoop-common/src/main/conf/ecschema-def.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-?xml version=1.0?
-
-!--
- 
- Licensed to the Apache Software Foundation (ASF) under one
- or more contributor license agreements.  See the NOTICE file
- distributed with this work for additional information
- regarding copyright ownership.  The ASF licenses this file
- to you under the Apache License, Version 2.0 (the
- License); you may not use this file except in compliance
- with the License.  You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an AS IS BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
---
-
-!--
-Please define your EC schemas here. Note, once these schemas are loaded
-and referenced by EC storage policies, any change to them will be ignored.
-You can modify and remove those not used yet, or add new ones.
---
-
-schemas
-  schema name=RS-10-4
-k10/k
-m4/m
-codecRS/codec
-  /schema
-/schemas
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90788377/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index af32674..70fea01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -141,11 +141,6 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   /** Supported erasure codec classes */
   public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
 
-  public static final String IO_ERASURECODE_SCHEMA_FILE_KEY =
-  io.erasurecode.schema.file;
-  public static final String IO_ERASURECODE_SCHEMA_FILE_DEFAULT =
-  ecschema-def.xml;
-
   /** Use XOR raw coder when possible for the RS codec */
   public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
   io.erasurecode.codec.rs.usexor;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/90788377/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/SchemaLoader.java
--
diff 

[44/50] hadoop git commit: HDFS-8120. Erasure coding: created util class to analyze striped block groups. Contributed by Zhe Zhang and Li Bo.

2015-04-20 Thread zhz
HDFS-8120. Erasure coding: created util class to analyze striped block groups. 
Contributed by Zhe Zhang and Li Bo.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff7de640
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff7de640
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff7de640

Branch: refs/heads/HDFS-7285
Commit: ff7de64055b21d6557c1f7941e076ac9c2465171
Parents: 58cf954
Author: Jing Zhao ji...@apache.org
Authored: Wed Apr 15 12:59:27 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:40 2015 -0700

--
 .../org/apache/hadoop/hdfs/DFSInputStream.java  |   4 +-
 .../hadoop/hdfs/DFSStripedInputStream.java  |  77 +++
 .../hadoop/hdfs/DFSStripedOutputStream.java |  34 +++--
 .../apache/hadoop/hdfs/StripedDataStreamer.java |  58 ++--
 .../server/blockmanagement/BlockManager.java|  26 +++-
 .../hadoop/hdfs/util/StripedBlockUtil.java  | 138 +++
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  91 +++-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  83 +--
 .../apache/hadoop/hdfs/TestReadStripedFile.java |  92 +++--
 .../server/namenode/TestAddStripedBlocks.java   | 107 ++
 .../namenode/TestRecoverStripedBlocks.java  |   3 +-
 .../hadoop/hdfs/util/TestStripedBlockUtil.java  | 125 +
 12 files changed, 562 insertions(+), 276 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7de640/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
index d728fda..705e0b7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java
@@ -1169,9 +1169,9 @@ implements ByteBufferReadable, CanSetDropBehind, 
CanSetReadahead,
   int nread = reader.readAll(buf, offsets[i], lengths[i]);
   updateReadStatistics(readStatistics, nread, reader);
 
-  if (nread != len) {
+  if (nread != lengths[i]) {
 throw new IOException(truncated return from reader.read():  +
-excpected  + len + , got  + nread);
+excpected  + lengths[i] + , got  + nread);
   }
 }
 DFSClientFaultInjector.get().readFromDatanodeDelay();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff7de640/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
index 8a431b1..d597407 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSStripedInputStream.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
 import org.apache.hadoop.hdfs.server.namenode.UnsupportedActionException;
+import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.htrace.Span;
 import org.apache.htrace.Trace;
@@ -50,7 +51,7 @@ import java.util.concurrent.Future;
  *
  * | - Striped Block Group - |
  *  blk_0  blk_1   blk_2   - A striped block group has
- *|  |   |  {@link #groupSize} blocks
+ *|  |   |  {@link #dataBlkNum} blocks
  *v  v   v
  * +--+   +--+   +--+
  * |cell_0|   |cell_1|   |cell_2|  - The logical read order should be
@@ -72,7 +73,7 @@ import java.util.concurrent.Future;
 public class DFSStripedInputStream extends DFSInputStream {
   /**
* This method plans the read portion from each block in the stripe
-   * @param groupSize The size / width of the striping group
+   * @param dataBlkNum The number of data blocks in the striping group
* @param cellSize The size of each striping cell
* @param startInBlk Starting offset in the striped block
* @param len Length of the read request
@@ -81,29 +82,29 @@ public class DFSStripedInputStream extends DFSInputStream {
* for an individual block in the group
*/
   @VisibleForTesting
-  

[16/50] hadoop git commit: HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by Zhe Zhang.

2015-04-20 Thread zhz
HADOOP-11740. Combine erasure encoder and decoder interfaces. Contributed by 
Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7d09515d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7d09515d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7d09515d

Branch: refs/heads/HDFS-7285
Commit: 7d09515d61b2b56042c04dc6af3762b5a0f53b22
Parents: 6be49e4
Author: Zhe Zhang z...@apache.org
Authored: Fri Apr 3 15:22:50 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:07 2015 -0700

--
 .../coder/AbstractErasureDecoder.java   |  7 ++--
 .../coder/AbstractErasureEncoder.java   |  7 ++--
 .../io/erasurecode/coder/ErasureCoder.java  | 12 ++
 .../io/erasurecode/coder/ErasureDecoder.java| 41 
 .../io/erasurecode/coder/ErasureEncoder.java| 39 ---
 .../erasurecode/coder/TestErasureCoderBase.java | 20 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt| 14 ++-
 7 files changed, 41 insertions(+), 99 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d09515d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
index 54a6d1e..cd31294 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureDecoder.java
@@ -23,13 +23,12 @@ import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 /**
  * An abstract erasure decoder that's to be inherited by new decoders.
  *
- * It implements the {@link ErasureDecoder} interface.
+ * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureDecoder extends AbstractErasureCoder
-implements ErasureDecoder {
+public abstract class AbstractErasureDecoder extends AbstractErasureCoder {
 
   @Override
-  public ErasureCodingStep decode(ECBlockGroup blockGroup) {
+  public ErasureCodingStep calculateCoding(ECBlockGroup blockGroup) {
 // We may have more than this when considering complicate cases. 
HADOOP-11550
 return prepareDecodingStep(blockGroup);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d09515d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
index 09b31e5..a836b75 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureEncoder.java
@@ -23,13 +23,12 @@ import org.apache.hadoop.io.erasurecode.ECBlockGroup;
 /**
  * An abstract erasure encoder that's to be inherited by new encoders.
  *
- * It implements the {@link ErasureEncoder} interface.
+ * It implements the {@link ErasureCoder} interface.
  */
-public abstract class AbstractErasureEncoder extends AbstractErasureCoder
-implements ErasureEncoder {
+public abstract class AbstractErasureEncoder extends AbstractErasureCoder {
 
   @Override
-  public ErasureCodingStep encode(ECBlockGroup blockGroup) {
+  public ErasureCodingStep calculateCoding(ECBlockGroup blockGroup) {
 // We may have more than this when considering complicate cases. 
HADOOP-11550
 return prepareEncodingStep(blockGroup);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7d09515d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
index c5922f3..fb90156 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/ErasureCoder.java
+++ 

[39/50] hadoop git commit: HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar B)

2015-04-20 Thread zhz
HDFS-7349. Support DFS command for the EC encoding (Contributed by Vinayakumar 
B)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/58cf9548
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/58cf9548
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/58cf9548

Branch: refs/heads/HDFS-7285
Commit: 58cf95485a64b397a27a8093687731fcf5e92cd5
Parents: dd9de3d
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 15 16:38:22 2015 +0530
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:39 2015 -0700

--
 .../main/java/org/apache/hadoop/fs/FsShell.java |   8 +-
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  18 ++
 .../hadoop/hdfs/DistributedFileSystem.java  |  32 +++
 .../hadoop/hdfs/protocol/ClientProtocol.java|   9 +
 .../apache/hadoop/hdfs/protocol/ECZoneInfo.java |  56 +
 ...tNamenodeProtocolServerSideTranslatorPB.java |  18 ++
 .../ClientNamenodeProtocolTranslatorPB.java |  19 ++
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  12 ++
 .../namenode/ErasureCodingZoneManager.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |  10 +
 .../hdfs/server/namenode/FSNamesystem.java  |  24 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +
 .../hadoop/hdfs/tools/erasurecode/ECCli.java|  48 +
 .../hdfs/tools/erasurecode/ECCommand.java   | 209 +++
 .../src/main/proto/ClientNamenodeProtocol.proto |   2 +
 .../src/main/proto/erasurecoding.proto  |  15 ++
 18 files changed, 502 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/58cf9548/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index db73f6d..f873a01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -111,6 +111,10 @@ public class FsShell extends Configured implements Tool {
 return getTrash().getCurrentTrashDir();
   }
 
+  protected String getUsagePrefix() {
+return usagePrefix;
+  }
+
   // NOTE: Usage/Help are inner classes to allow access to outer methods
   // that access commandFactory
   
@@ -194,7 +198,7 @@ public class FsShell extends Configured implements Tool {
   }
 } else {
   // display help or usage for all commands 
-  out.println(usagePrefix);
+  out.println(getUsagePrefix());
   
   // display list of short usages
   ArrayListCommand instances = new ArrayListCommand();
@@ -218,7 +222,7 @@ public class FsShell extends Configured implements Tool {
   }
 
   private void printInstanceUsage(PrintStream out, Command instance) {
-out.println(usagePrefix +   + instance.getUsage());
+out.println(getUsagePrefix() +   + instance.getUsage());
   }
 
   private void printInstanceHelp(PrintStream out, Command instance) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58cf9548/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 9fdac98..b9fc6fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -79,4 +79,6 @@
 operation fails. (Rakesh R via Zhe Zhang)
 
 HDFS-8123. Erasure Coding: Better to move EC related proto messages to a
-separate erasurecoding proto file (Rakesh R via vinayakumarb)
\ No newline at end of file
+separate erasurecoding proto file (Rakesh R via vinayakumarb)
+
+HDFS-7349. Support DFS command for the EC encoding (vinayakumarb)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/58cf9548/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index f464261..84c79b8 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -134,6 +134,11 @@ case ${COMMAND} in
 hadoop_debug Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS
 HADOOP_OPTS=${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}
   ;;
+  erasurecode)
+

[37/50] hadoop git commit: HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of ECZone. Contributed by Vinayakumar B.

2015-04-20 Thread zhz
HDFS-8122. Erasure Coding: Support specifying ECSchema during creation of 
ECZone. Contributed by Vinayakumar B.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4c8f108
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4c8f108
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4c8f108

Branch: refs/heads/HDFS-7285
Commit: d4c8f108eee75574837487d8331ac5690b3f42c6
Parents: ec3c683
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 13 11:08:57 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:38 2015 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  6 ++--
 .../hadoop/hdfs/DistributedFileSystem.java  | 33 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  6 ++--
 ...tNamenodeProtocolServerSideTranslatorPB.java |  4 ++-
 .../ClientNamenodeProtocolTranslatorPB.java |  5 ++-
 .../namenode/ErasureCodingZoneManager.java  | 30 +-
 .../hdfs/server/namenode/FSDirectory.java   | 22 -
 .../hdfs/server/namenode/FSNamesystem.java  | 19 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  6 ++--
 .../src/main/proto/ClientNamenodeProtocol.proto |  1 +
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  2 +-
 .../hadoop/hdfs/TestDFSStripedOutputStream.java |  2 +-
 .../hadoop/hdfs/TestErasureCodingZones.java | 18 +--
 .../server/namenode/TestAddStripedBlocks.java   |  2 +-
 .../server/namenode/TestFSEditLogLoader.java|  4 +--
 .../hdfs/server/namenode/TestFSImage.java   |  4 +--
 16 files changed, 112 insertions(+), 52 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c8f108/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 6a4b3d9..c03cb36 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -1317,7 +1317,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
  Progressable progress,
  int buffersize,
  ChecksumOpt checksumOpt) throws IOException {
-return create(src, permission, flag, createParent, replication, blockSize, 
+return create(src, permission, flag, createParent, replication, blockSize,
 progress, buffersize, checksumOpt, null);
   }
 
@@ -2961,12 +2961,12 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 return new EncryptionZoneIterator(namenode, traceSampler);
   }
 
-  public void createErasureCodingZone(String src)
+  public void createErasureCodingZone(String src, ECSchema schema)
   throws IOException {
 checkOpen();
 TraceScope scope = getPathTraceScope(createErasureCodingZone, src);
 try {
-  namenode.createErasureCodingZone(src);
+  namenode.createErasureCodingZone(src, schema);
 } catch (RemoteException re) {
   throw re.unwrapRemoteException(AccessControlException.class,
   SafeModeException.class,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4c8f108/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
index 8e7daf3..5fec339 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
@@ -86,6 +86,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import 
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.erasurecode.ECSchema;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Credentials;
@@ -2260,4 +2261,36 @@ public class DistributedFileSystem extends FileSystem {
   throws IOException {
 return dfs.getInotifyEventStream(lastReadTxid);
   }
+
+  /**
+   * Create the erasurecoding zone
+   * 
+   * @param path Directory to create the ec zone
+   * @param 

[27/50] hadoop git commit: HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by Zhe Zhang and Jing Zhao

2015-04-20 Thread zhz
HDFS-7782. Erasure coding: pread from files in striped layout. Contributed by 
Zhe Zhang and Jing Zhao


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06987aaf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06987aaf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06987aaf

Branch: refs/heads/HDFS-7285
Commit: 06987aaf522ffad65295daaf8f004a4f3f347dfb
Parents: 7126858
Author: Zhe Zhang z...@apache.org
Authored: Tue Apr 7 11:20:13 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:27:31 2015 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/DFSClient.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06987aaf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 49a23d8..f7b48eb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3156,7 +3156,7 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
   LOG.debug(Using hedged reads; pool threads= + num);
 }
   }
-  
+
   /**
* Create thread pool for parallel reading in striped layout,
* STRIPED_READ_THREAD_POOL, if it does not already exist.



[32/50] hadoop git commit: HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. Contributed by Li Bo

2015-04-20 Thread zhz
HDFS-7889 Subclass DFSOutputStream to support writing striping layout files. 
Contributed by Li Bo


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb71eec2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb71eec2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb71eec2

Branch: refs/heads/HDFS-7285
Commit: cb71eec2ce97a201631df494878b06a01a932ce5
Parents: c56577b
Author: Kai Zheng kai.zh...@intel.com
Authored: Sat Apr 11 01:03:37 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:28:37 2015 -0700

--
 .../hadoop-hdfs/CHANGES-HDFS-EC-7285.txt|   4 +-
 .../org/apache/hadoop/hdfs/DFSOutputStream.java |  13 +-
 .../java/org/apache/hadoop/hdfs/DFSPacket.java  |  26 +-
 .../hadoop/hdfs/DFSStripedOutputStream.java | 439 +++
 .../org/apache/hadoop/hdfs/DataStreamer.java|  12 +-
 .../apache/hadoop/hdfs/StripedDataStreamer.java | 241 ++
 .../hadoop/hdfs/TestDFSStripedOutputStream.java | 311 +
 7 files changed, 1031 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb71eec2/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
index 1e695c4..753795a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES-HDFS-EC-7285.txt
@@ -56,4 +56,6 @@
 
 HDFS-8074. Define a system-wide default EC schema. (Kai Zheng)
 
-HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
\ No newline at end of file
+HDFS-8104. Make hard-coded values consistent with the system default 
schema first before remove them. (Kai Zheng)
+
+HDFS-7889. Subclass DFSOutputStream to support writing striping layout 
files. (Li Bo via Kai Zheng)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb71eec2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
index 8cde274..8270331 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
@@ -268,8 +268,14 @@ public class DFSOutputStream extends FSOutputSummer
 }
   }
   Preconditions.checkNotNull(stat, HdfsFileStatus should not be null!);
-  final DFSOutputStream out = new DFSOutputStream(dfsClient, src, stat,
-  flag, progress, checksum, favoredNodes);
+  final DFSOutputStream out;
+  if(stat.getReplication() == 0) {
+out = new DFSStripedOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  } else {
+out = new DFSOutputStream(dfsClient, src, stat,
+flag, progress, checksum, favoredNodes);
+  }
   out.start();
   return out;
 } finally {
@@ -347,6 +353,9 @@ public class DFSOutputStream extends FSOutputSummer
   String[] favoredNodes) throws IOException {
 TraceScope scope =
 dfsClient.getPathTraceScope(newStreamForAppend, src);
+   if(stat.getReplication() == 0) {
+  throw new IOException(Not support appending to a striping layout file 
yet.);
+}
 try {
   final DFSOutputStream out = new DFSOutputStream(dfsClient, src, flags,
   progress, lastBlock, stat, checksum);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb71eec2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
index 22055c3..9cd1ec1 100755
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSPacket.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
 import java.nio.channels.ClosedChannelException;
 import java.util.Arrays;
 
@@ -113,6 +114,19 @@ class DFSPacket {
 dataPos += len;
   }
 
+  synchronized 

[07/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, HDFS-7435, HDFS-7930, HDFS-7960 (this commit is for HDFS-7960)

2015-04-20 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts when merging with HDFS-7903, 
HDFS-7435, HDFS-7930, HDFS-7960 (this commit is for HDFS-7960)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b73f1c1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b73f1c1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b73f1c1

Branch: refs/heads/HDFS-7285
Commit: 9b73f1c1b9c4d704234043a7db3dca3daa4df36b
Parents: da733dc
Author: Zhe Zhang z...@apache.org
Authored: Tue Mar 24 11:39:36 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:04 2015 -0700

--
 .../apache/hadoop/hdfs/server/blockmanagement/BlockManager.java | 4 ++--
 .../blockmanagement/TestNameNodePrunesMissingStorages.java  | 5 -
 .../hadoop/hdfs/server/namenode/TestAddStripedBlocks.java   | 2 +-
 3 files changed, 7 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b73f1c1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6731524..0ac7b64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -1984,10 +1984,10 @@ public class BlockManager {
  longer exists on the DataNode.,
   Long.toHexString(context.getReportId()), zombie.getStorageID());
 assert(namesystem.hasWriteLock());
-IteratorBlockInfoContiguous iter = zombie.getBlockIterator();
+IteratorBlockInfo iter = zombie.getBlockIterator();
 int prevBlocks = zombie.numBlocks();
 while (iter.hasNext()) {
-  BlockInfoContiguous block = iter.next();
+  BlockInfo block = iter.next();
   // We assume that a block can be on only one storage in a DataNode.
   // That's why we pass in the DatanodeDescriptor rather than the
   // DatanodeStorageInfo.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b73f1c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
index 4b97d01..e9329cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestNameNodePrunesMissingStorages.java
@@ -171,9 +171,12 @@ public class TestNameNodePrunesMissingStorages {
   String datanodeUuid;
   // Find the first storage which this block is in.
   try {
+BlockInfo storedBlock =
+cluster.getNamesystem().getBlockManager().
+getStoredBlock(block.getLocalBlock());
 IteratorDatanodeStorageInfo storageInfoIter =
 cluster.getNamesystem().getBlockManager().
-getStorages(block.getLocalBlock()).iterator();
+blocksMap.getStorages(storedBlock).iterator();
 assertTrue(storageInfoIter.hasNext());
 DatanodeStorageInfo info = storageInfoIter.next();
 storageIdToRemove = info.getStorageID();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b73f1c1/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
index 05aec4b..7d7c81e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
@@ -269,7 +269,7 @@ public class TestAddStripedBlocks {
   StorageBlockReport[] reports = {new StorageBlockReport(storage,
   bll)};
   

[08/50] hadoop git commit: HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage. Contributed by Hui Zheng.

2015-04-20 Thread zhz
HDFS-7827. Erasure Coding: support striped blocks in non-protobuf fsimage. 
Contributed by Hui Zheng.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/da733dc2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/da733dc2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/da733dc2

Branch: refs/heads/HDFS-7285
Commit: da733dc2afca7065eb0cad27076806aeaacca914
Parents: 534bb0c
Author: Jing Zhao ji...@apache.org
Authored: Mon Mar 23 15:10:10 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:04 2015 -0700

--
 .../blockmanagement/BlockInfoStriped.java   |  11 +-
 .../hdfs/server/namenode/FSImageFormat.java |  62 ++--
 .../server/namenode/FSImageSerialization.java   |  78 +++---
 .../blockmanagement/TestBlockInfoStriped.java   |  34 +
 .../hdfs/server/namenode/TestFSImage.java   | 148 ++-
 5 files changed, 300 insertions(+), 33 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/da733dc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
index cef8318..30b5ee7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoStriped.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.BlockUCState;
+import java.io.DataOutput;
+import java.io.IOException;
 
 /**
  * Subclass of {@link BlockInfo}, presenting a block group in erasure coding.
@@ -206,6 +208,13 @@ public class BlockInfoStriped extends BlockInfo {
 return num;
   }
 
+  @Override
+  public void write(DataOutput out) throws IOException {
+out.writeShort(dataBlockNum);
+out.writeShort(parityBlockNum);
+super.write(out);
+  }
+
   /**
* Convert a complete block to an under construction block.
* @return BlockInfoUnderConstruction -  an under construction block.
@@ -215,7 +224,7 @@ public class BlockInfoStriped extends BlockInfo {
 final BlockInfoStripedUnderConstruction ucBlock;
 if(isComplete()) {
   ucBlock = new BlockInfoStripedUnderConstruction(this, getDataBlockNum(),
-  getParityBlockNum(),  s, targets);
+  getParityBlockNum(), s, targets);
   ucBlock.setBlockCollection(getBlockCollection());
 } else {
   // the block is already under construction

http://git-wip-us.apache.org/repos/asf/hadoop/blob/da733dc2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
index 2e6e741..ad96863 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormat.java
@@ -47,13 +47,16 @@ import org.apache.hadoop.fs.PathIsNotDirectoryException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutFlags;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguous;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStriped;
 import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoContiguousUnderConstruction;
+import 
org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoStripedUnderConstruction;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
@@ 

[11/50] hadoop git commit: HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this commit is for HDFS-7742)

2015-04-20 Thread zhz
HDFS-7936. Erasure coding: resolving conflicts in the branch when merging (this 
commit is for HDFS-7742)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6a2a5e78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6a2a5e78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6a2a5e78

Branch: refs/heads/HDFS-7285
Commit: 6a2a5e78a116fc7ca1458d916dcb8c924f9d61eb
Parents: c58b611
Author: Zhe Zhang z...@apache.org
Authored: Mon Mar 30 10:23:09 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:05 2015 -0700

--
 .../hdfs/server/blockmanagement/TestBlockManager.java   | 12 ++--
 1 file changed, 6 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6a2a5e78/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index cbea3d8..43f4607 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -552,11 +552,11 @@ public class TestBlockManager {
 assertNotNull(Chooses decommissioning source node for a normal 
replication
 +  if all available source nodes have reached their replication
 +  limits below the hard limit.,
-bm.chooseSourceDatanode(
-aBlock,
+bm.chooseSourceDatanodes(
+bm.getStoredBlock(aBlock),
 cntNodes,
 liveNodes,
-new NumberReplicas(),
+new NumberReplicas(), new LinkedListShort(), 1,
 UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
 
 
@@ -566,11 +566,11 @@ public class TestBlockManager {
 
 assertNull(Does not choose a source decommissioning node for a normal
 +  replication when all available nodes exceed the hard limit.,
-bm.chooseSourceDatanode(
-aBlock,
+bm.chooseSourceDatanodes(
+bm.getStoredBlock(aBlock),
 cntNodes,
 liveNodes,
-new NumberReplicas(),
+new NumberReplicas(), new LinkedListShort(), 1,
 UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED));
   }
 



[21/50] hadoop git commit: Updated CHANGES-HDFS-EC-7285.txt

2015-04-20 Thread zhz
Updated CHANGES-HDFS-EC-7285.txt


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5eef7938
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5eef7938
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5eef7938

Branch: refs/heads/HDFS-7285
Commit: 5eef7938cf81a7719c8ffa1e08764233566ae175
Parents: da45676
Author: Kai Zheng kai.zh...@intel.com
Authored: Wed Apr 8 01:31:46 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:08 2015 -0700

--
 hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt | 3 +++
 1 file changed, 3 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eef7938/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index 01280db..68d1d32 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -32,3 +32,6 @@
 
 HADOOP-11782 Correct two thrown messages in ECSchema class. Contributed by 
Xinwei Qin
 ( Xinwei Qin via Kai Zheng )
+
+HADOOP-11805 Better to rename some raw erasure coders. Contributed by Kai 
Zheng
+( Kai Zheng )



[19/50] hadoop git commit: HDFS-7969. Erasure coding: NameNode support for lease recovery of striped block groups. Contributed by Zhe Zhang.

2015-04-20 Thread zhz
HDFS-7969. Erasure coding: NameNode support for lease recovery of striped block 
groups. Contributed by Zhe Zhang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68a15991
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68a15991
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68a15991

Branch: refs/heads/HDFS-7285
Commit: 68a1599183db0cbc122d3beea8a86bc25805159e
Parents: 9e0f3de
Author: Zhe Zhang z...@apache.org
Authored: Mon Apr 6 12:52:44 2015 -0700
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:08 2015 -0700

--
 .../BlockInfoContiguousUnderConstruction.java   | 33 
 .../BlockInfoStripedUnderConstruction.java  | 80 
 .../BlockInfoUnderConstruction.java | 57 ++
 .../blockmanagement/DatanodeDescriptor.java | 12 +--
 .../server/blockmanagement/DatanodeManager.java | 10 +--
 .../hdfs/server/namenode/FSNamesystem.java  | 24 +++---
 .../TestBlockInfoUnderConstruction.java |  2 +-
 7 files changed, 163 insertions(+), 55 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68a15991/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
index 7a052fd..9ba2978 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockInfoContiguousUnderConstruction.java
@@ -31,7 +31,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
  * Represents a block that is currently being constructed.br
  * This is usually the last block of a file opened for write or append.
  */
-public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous {
+public class BlockInfoContiguousUnderConstruction extends BlockInfoContiguous
+implements BlockInfoUnderConstruction{
   /** Block state. See {@link BlockUCState} */
   private BlockUCState blockUCState;
 
@@ -94,7 +95,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 return new BlockInfoContiguous(this);
   }
 
-  /** Set expected locations */
+  @Override
   public void setExpectedLocations(DatanodeStorageInfo[] targets) {
 int numLocations = targets == null ? 0 : targets.length;
 this.replicas = new ArrayList(numLocations);
@@ -104,10 +105,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 }
   }
 
-  /**
-   * Create array of expected replica locations
-   * (as has been assigned by chooseTargets()).
-   */
+  @Override
   public DatanodeStorageInfo[] getExpectedStorageLocations() {
 int numLocations = replicas == null ? 0 : replicas.size();
 DatanodeStorageInfo[] storages = new DatanodeStorageInfo[numLocations];
@@ -117,7 +115,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 return storages;
   }
 
-  /** Get the number of expected locations */
+  @Override
   public int getNumExpectedLocations() {
 return replicas == null ? 0 : replicas.size();
   }
@@ -135,25 +133,26 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 blockUCState = s;
   }
 
-  /** Get block recovery ID */
+  @Override
   public long getBlockRecoveryId() {
 return blockRecoveryId;
   }
 
-  /** Get recover block */
+  @Override
   public Block getTruncateBlock() {
 return truncateBlock;
   }
 
+  @Override
+  public Block toBlock(){
+return this;
+  }
+
   public void setTruncateBlock(Block recoveryBlock) {
 this.truncateBlock = recoveryBlock;
   }
 
-  /**
-   * Process the recorded replicas. When about to commit or finish the
-   * pipeline recovery sort out bad replicas.
-   * @param genStamp  The final generation stamp for the block.
-   */
+  @Override
   public void setGenerationStampAndVerifyReplicas(long genStamp) {
 // Set the generation stamp for the block.
 setGenerationStamp(genStamp);
@@ -187,11 +186,7 @@ public class BlockInfoContiguousUnderConstruction extends 
BlockInfoContiguous {
 setGenerationStampAndVerifyReplicas(block.getGenerationStamp());
   }
 
-  /**
-   * Initialize lease recovery for this block.
-   * Find the first alive data-node starting from the previous primary and
-   * make it primary.
-   */
+  @Override
   

[03/50] hadoop git commit: HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng

2015-04-20 Thread zhz
HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/916a1ea1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/916a1ea1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/916a1ea1

Branch: refs/heads/HDFS-7285
Commit: 916a1ea1ac462286e73bd64dc9ae3bdae2f2bbc8
Parents: 757c3c2
Author: Kai Zheng kai.zh...@intel.com
Authored: Fri Mar 20 19:15:52 2015 +0800
Committer: Zhe Zhang z...@apache.org
Committed: Mon Apr 20 10:22:03 2015 -0700

--
 .../hadoop-common/CHANGES-HDFS-EC-7285.txt  |  3 +
 .../hadoop/fs/CommonConfigurationKeys.java  | 15 
 .../erasurecode/coder/AbstractErasureCoder.java | 65 ++
 .../coder/AbstractErasureDecoder.java   |  6 +-
 .../coder/AbstractErasureEncoder.java   |  6 +-
 .../io/erasurecode/coder/RSErasureDecoder.java  | 83 ++
 .../io/erasurecode/coder/RSErasureEncoder.java  | 47 ++
 .../io/erasurecode/coder/XorErasureDecoder.java |  2 +-
 .../io/erasurecode/coder/XorErasureEncoder.java |  2 +-
 .../erasurecode/coder/TestRSErasureCoder.java   | 92 
 10 files changed, 315 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/916a1ea1/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
--
diff --git a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt 
b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
index f566f0e..b69e69a 100644
--- a/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
+++ b/hadoop-common-project/hadoop-common/CHANGES-HDFS-EC-7285.txt
@@ -26,3 +26,6 @@
 
 HADOOP-11707. Add factory to create raw erasure coder. Contributed by Kai 
Zheng
 ( Kai Zheng )
+
+HADOOP-11647. Reed-Solomon ErasureCoder. Contributed by Kai Zheng
+( Kai Zheng )

http://git-wip-us.apache.org/repos/asf/hadoop/blob/916a1ea1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
index 7575496..70fea01 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
@@ -135,6 +135,21 @@ public class CommonConfigurationKeys extends 
CommonConfigurationKeysPublic {
   false;
 
   /**
+   * Erasure Coding configuration family
+   */
+
+  /** Supported erasure codec classes */
+  public static final String IO_ERASURECODE_CODECS_KEY = 
io.erasurecode.codecs;
+
+  /** Use XOR raw coder when possible for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_USEXOR_KEY =
+  io.erasurecode.codec.rs.usexor;
+
+  /** Raw coder factory for the RS codec */
+  public static final String IO_ERASURECODE_CODEC_RS_RAWCODER_KEY =
+  io.erasurecode.codec.rs.rawcoder;
+
+  /**
* Service Authorization
*/
   public static final String 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/916a1ea1/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
index 8d3bc34..0e4de89 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/coder/AbstractErasureCoder.java
@@ -17,7 +17,12 @@
  */
 package org.apache.hadoop.io.erasurecode.coder;
 
+import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureCoderFactory;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureDecoder;
+import org.apache.hadoop.io.erasurecode.rawcoder.RawErasureEncoder;
 
 /**
  * A common class of basic facilities to be shared by encoder and decoder
@@ -31,6 +36,66 @@ public abstract class AbstractErasureCoder
   private int numParityUnits;
   private int chunkSize;
 
+  /**
+   * Create raw decoder using the factory specified by 

[Hadoop Wiki] Update of LibHDFS by SteveLoughran

2015-04-20 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on Hadoop Wiki for change 
notification.

The LibHDFS page has been changed by SteveLoughran:
https://wiki.apache.org/hadoop/LibHDFS?action=diffrev1=11rev2=12

Comment:
time to remove the @lucene email ref!

  Anchor(Contact)
  = Contact Information =
  
-  Please drop us an email at '''hadoop-us...@lucene.apache.org''' if you have 
any questions or any suggestions. Use 
[[http://issues.apache.org/jira/browse/HADOOP|Jira]] (component: dfs) to report 
bugs.
+  Please drop us an email at '''us...@hadoop.apache.org''' if you have any 
questions or any suggestions. Use 
[[http://issues.apache.org/jira/browse/HADOOP|Jira]] (component: hdfs) to 
report bugs.
  
  BR
  Anchor(Conclusion)