[09/50] hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-04-20 Thread zhz
HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/232fae55
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/232fae55
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/232fae55

Branch: refs/heads/HDFS-7285
Commit: 232fae552e736358f698cf982b9682c3b6b927f4
Parents: ba93dd7
Author: Jing Zhao 
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 20 10:22:05 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 134 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 290 insertions(+), 211 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/232fae55/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0af2ce9..48ddab5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -540,7 +540,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedList(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedList(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1382,7 +1382,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(List> blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 List containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1410,13 +1410,10 @@ public class BlockManager {
 containingNodes = new ArrayList<>();
 List liveReplicaNodes = new ArrayList<>();
 NumberReplicas numReplicas = new NumberReplicas();
-List missingBlockIndices = new LinkedList<>();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+List liveBlockIndices = new ArrayList<>();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug("Block " + block + " cannot be recovered " +
@@ -1448,15 +1445,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i < liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i < missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, srcNodes,
@@ -1536,15 +1532,14 @@ public class BlockManager {
   }
 
   // 

[24/50] [abbrv] hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-04-13 Thread zhz
HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e05166c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e05166c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e05166c6

Branch: refs/heads/HDFS-7285
Commit: e05166c6be6cd89b0260fb33f9680f64ff00e0ac
Parents: f10ef25
Author: Jing Zhao 
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 13 13:09:54 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 134 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 290 insertions(+), 211 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e05166c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 47865ec..78ada82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -539,7 +539,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedList(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedList(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1376,7 +1376,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(List> blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 List containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1404,13 +1404,10 @@ public class BlockManager {
 containingNodes = new ArrayList<>();
 List liveReplicaNodes = new ArrayList<>();
 NumberReplicas numReplicas = new NumberReplicas();
-List missingBlockIndices = new LinkedList<>();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+List liveBlockIndices = new ArrayList<>();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug("Block " + block + " cannot be recovered " +
@@ -1442,15 +1439,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i < liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i < missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, srcNodes,
@@ -1530,15 +1526,14 @@ public class BlockManager {
   }
 
   // 

[27/50] [abbrv] hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-04-13 Thread zhz
HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2c14bbe2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2c14bbe2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2c14bbe2

Branch: refs/heads/HDFS-7285
Commit: 2c14bbe2692b6b9e203cfd6be7501c651ef8a07f
Parents: 2e39c2b
Author: Jing Zhao 
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 13 09:48:51 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 134 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 290 insertions(+), 211 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2c14bbe2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 47865ec..78ada82 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -539,7 +539,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedList(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedList(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1376,7 +1376,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(List> blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 List containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1404,13 +1404,10 @@ public class BlockManager {
 containingNodes = new ArrayList<>();
 List liveReplicaNodes = new ArrayList<>();
 NumberReplicas numReplicas = new NumberReplicas();
-List missingBlockIndices = new LinkedList<>();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+List liveBlockIndices = new ArrayList<>();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug("Block " + block + " cannot be recovered " +
@@ -1442,15 +1439,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i < liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i < missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, srcNodes,
@@ -1530,15 +1526,14 @@ public class BlockManager {
   }
 
   // 

[43/51] [abbrv] hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-04-06 Thread zhz
HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62d49a84
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62d49a84
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62d49a84

Branch: refs/heads/HDFS-7285
Commit: 62d49a84d13a3ff7f63281a318a1940acee64743
Parents: 0767bd7
Author: Jing Zhao 
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Zhe Zhang 
Committed: Mon Apr 6 10:21:03 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 138 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 292 insertions(+), 213 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62d49a84/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e05330c..f42adcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -538,7 +538,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedList(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedList(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1376,7 +1376,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(List> blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 List containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1404,13 +1404,10 @@ public class BlockManager {
 containingNodes = new ArrayList<>();
 List liveReplicaNodes = new ArrayList<>();
 NumberReplicas numReplicas = new NumberReplicas();
-List missingBlockIndices = new LinkedList<>();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+List liveBlockIndices = new ArrayList<>();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug("Block " + block + " cannot be recovered " +
@@ -1442,15 +1439,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i < liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i < missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, srcNodes,
@@ -1530,15 +1526,14 @@ public class BlockManager {
   }
 
   // A

[47/50] [abbrv] hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-04-02 Thread zhz
HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2070431c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2070431c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2070431c

Branch: refs/heads/HDFS-7285
Commit: 2070431cee6e992bf2a8c7ad0fe39d6b47c61780
Parents: 019d211
Author: Jing Zhao 
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Zhe Zhang 
Committed: Thu Apr 2 11:06:11 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 138 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 292 insertions(+), 213 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2070431c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5aed4d7..23f0699 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -538,7 +538,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedList(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedList(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1376,7 +1376,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(List> blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 List containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1404,13 +1404,10 @@ public class BlockManager {
 containingNodes = new ArrayList<>();
 List liveReplicaNodes = new ArrayList<>();
 NumberReplicas numReplicas = new NumberReplicas();
-List missingBlockIndices = new LinkedList<>();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+List liveBlockIndices = new ArrayList<>();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug("Block " + block + " cannot be recovered " +
@@ -1442,15 +1439,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i < liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i < missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, srcNodes,
@@ -1530,15 +1526,14 @@ public class BlockManager {
   }
 
   // A

hadoop git commit: HDFS-8005. Erasure Coding: simplify striped block recovery work computation and add tests. Contributed by Jing Zhao.

2015-03-30 Thread jing9
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7285 a1075153e -> 5ef6204c0


HDFS-8005. Erasure Coding: simplify striped block recovery work computation and 
add tests. Contributed by Jing Zhao.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ef6204c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ef6204c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ef6204c

Branch: refs/heads/HDFS-7285
Commit: 5ef6204c01f96be6d6c93cf797330dc6eaaeac65
Parents: a107515
Author: Jing Zhao 
Authored: Mon Mar 30 13:35:36 2015 -0700
Committer: Jing Zhao 
Committed: Mon Mar 30 13:35:36 2015 -0700

--
 .../server/blockmanagement/BlockManager.java| 138 +---
 .../blockmanagement/DatanodeDescriptor.java |  14 +-
 .../hadoop/hdfs/server/namenode/INodeFile.java  |   1 +
 .../blockmanagement/TestBlockManager.java   |  33 +--
 .../TestRecoverStripedBlocks.java   | 107 --
 .../server/namenode/TestAddStripedBlocks.java   |   2 +-
 .../namenode/TestRecoverStripedBlocks.java  | 210 +++
 7 files changed, 292 insertions(+), 213 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ef6204c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 7e8a88c..063b396 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -538,7 +538,7 @@ public class BlockManager {
 // source node returned is not used
 chooseSourceDatanodes(getStoredBlock(block), containingNodes,
 containingLiveReplicasNodes, numReplicas,
-new LinkedList(), 1, UnderReplicatedBlocks.LEVEL);
+new LinkedList(), UnderReplicatedBlocks.LEVEL);
 
 // containingLiveReplicasNodes can include READ_ONLY_SHARED replicas which 
are 
 // not included in the numReplicas.liveReplicas() count
@@ -1376,7 +1376,7 @@ public class BlockManager {
   int computeRecoveryWorkForBlocks(List> blocksToRecover) {
 int requiredReplication, numEffectiveReplicas;
 List containingNodes;
-BlockCollection bc = null;
+BlockCollection bc;
 int additionalReplRequired;
 
 int scheduledWork = 0;
@@ -1404,13 +1404,10 @@ public class BlockManager {
 containingNodes = new ArrayList<>();
 List liveReplicaNodes = new ArrayList<>();
 NumberReplicas numReplicas = new NumberReplicas();
-List missingBlockIndices = new LinkedList<>();
-DatanodeDescriptor[] srcNodes;
-int numSourceNodes = bc.isStriped() ?
-HdfsConstants.NUM_DATA_BLOCKS : 1;
-srcNodes = chooseSourceDatanodes(
-block, containingNodes, liveReplicaNodes, numReplicas,
-missingBlockIndices, numSourceNodes, priority);
+List liveBlockIndices = new ArrayList<>();
+final DatanodeDescriptor[] srcNodes = chooseSourceDatanodes(block,
+containingNodes, liveReplicaNodes, numReplicas,
+liveBlockIndices, priority);
 if(srcNodes == null || srcNodes.length == 0) {
   // block can not be replicated from any node
   LOG.debug("Block " + block + " cannot be recovered " +
@@ -1442,15 +1439,14 @@ public class BlockManager {
 } else {
   additionalReplRequired = 1; // Needed on a new rack
 }
-if (bc.isStriped()) {
+if (block.isStriped()) {
+  short[] indices = new short[liveBlockIndices.size()];
+  for (int i = 0 ; i < liveBlockIndices.size(); i++) {
+indices[i] = liveBlockIndices.get(i);
+  }
   ErasureCodingWork ecw = new ErasureCodingWork(block, bc, 
srcNodes,
   containingNodes, liveReplicaNodes, additionalReplRequired,
-  priority);
-  short[] missingBlockArray = new 
short[missingBlockIndices.size()];
-  for (int i = 0 ; i < missingBlockIndices.size(); i++) {
-missingBlockArray[i] = missingBlockIndices.get(i);
-  }
-  ecw.setMissingBlockIndices(missingBlockArray);
+  priority, indices);
   recovWork.add(ecw);
 } else {
   recovWork.add(new ReplicationWork(block, bc, srcNod