hadoop git commit: HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be easily overrided. (Contributed by Walter Su)

2015-04-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/trunk 4be648b55 - d505c8acd


HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be 
easily overrided. (Contributed by Walter Su)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d505c8ac
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d505c8ac
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d505c8ac

Branch: refs/heads/trunk
Commit: d505c8acd30d6f40d0632fe9c93c886a4499a9fc
Parents: 4be648b
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 09:56:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 8 09:56:37 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../BlockPlacementPolicyDefault.java| 87 
 2 files changed, 54 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d505c8ac/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index f194bd7..ac508cb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -385,6 +385,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
 HdfsClientConfigKeys.Retry.  (szetszwo)
 
+HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
+can be easily overrided. (Walter Su via vinayakumarb)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d505c8ac/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3262772..09db986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -333,41 +333,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 +  unavailableStorages= + unavailableStorages
 + , storagePolicy= + storagePolicy);
   }
-
-  if (numOfResults == 0) {
-writer = chooseLocalStorage(writer, excludedNodes, blocksize,
-maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
-.getDatanodeDescriptor();
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
-  if (numOfResults = 1) {
-chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-results, avoidStaleNodes, storageTypes);
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  if (numOfResults = 2) {
-final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
-if (clusterMap.isOnSameRack(dn0, dn1)) {
-  chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else if (newBlock){
-  chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else {
-  chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-}
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
-  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+  writer = chooseTargetInOrder(numOfReplicas, writer, excludedNodes, 
blocksize,
+  maxNodesPerRack, results, avoidStaleNodes, newBlock, storageTypes);
 } catch (NotEnoughReplicasException e) {
   final String message = Failed to place enough replicas, still in need 
of 
   + (totalReplicasExpected - results.size()) +  to reach 
@@ -422,7 +389,55 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 }
 return writer;
   }
-
+
+  protected Node chooseTargetInOrder(int numOfReplicas, 
+ Node writer,
+ 

hadoop git commit: HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be easily overrided. (Contributed by Walter Su)

2015-04-07 Thread vinayakumarb
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 eb3ca514f - 3cf7ac181


HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it can be 
easily overrided. (Contributed by Walter Su)

(cherry picked from commit d505c8acd30d6f40d0632fe9c93c886a4499a9fc)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3cf7ac18
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3cf7ac18
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3cf7ac18

Branch: refs/heads/branch-2
Commit: 3cf7ac181ba9c6a31d4452d05d796901c5ed5517
Parents: eb3ca51
Author: Vinayakumar B vinayakum...@apache.org
Authored: Wed Apr 8 09:56:37 2015 +0530
Committer: Vinayakumar B vinayakum...@apache.org
Committed: Wed Apr 8 09:57:49 2015 +0530

--
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt |  3 +
 .../BlockPlacementPolicyDefault.java| 87 
 2 files changed, 54 insertions(+), 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf7ac18/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index 687d7d5..e969644 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -70,6 +70,9 @@ Release 2.8.0 - UNRELEASED
 HDFS-8079. Move dfs.client.retry.* confs from DFSConfigKeys to
 HdfsClientConfigKeys.Retry.  (szetszwo)
 
+HDFS-8073. Split BlockPlacementPolicyDefault.chooseTarget(..) so it
+can be easily overrided. (Walter Su via vinayakumarb)
+
   OPTIMIZATIONS
 
 HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3cf7ac18/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
index 3262772..09db986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
@@ -333,41 +333,8 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 +  unavailableStorages= + unavailableStorages
 + , storagePolicy= + storagePolicy);
   }
-
-  if (numOfResults == 0) {
-writer = chooseLocalStorage(writer, excludedNodes, blocksize,
-maxNodesPerRack, results, avoidStaleNodes, storageTypes, true)
-.getDatanodeDescriptor();
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  final DatanodeDescriptor dn0 = results.get(0).getDatanodeDescriptor();
-  if (numOfResults = 1) {
-chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-results, avoidStaleNodes, storageTypes);
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  if (numOfResults = 2) {
-final DatanodeDescriptor dn1 = results.get(1).getDatanodeDescriptor();
-if (clusterMap.isOnSameRack(dn0, dn1)) {
-  chooseRemoteRack(1, dn0, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else if (newBlock){
-  chooseLocalRack(dn1, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-} else {
-  chooseLocalRack(writer, excludedNodes, blocksize, maxNodesPerRack,
-  results, avoidStaleNodes, storageTypes);
-}
-if (--numOfReplicas == 0) {
-  return writer;
-}
-  }
-  chooseRandom(numOfReplicas, NodeBase.ROOT, excludedNodes, blocksize,
-  maxNodesPerRack, results, avoidStaleNodes, storageTypes);
+  writer = chooseTargetInOrder(numOfReplicas, writer, excludedNodes, 
blocksize,
+  maxNodesPerRack, results, avoidStaleNodes, newBlock, storageTypes);
 } catch (NotEnoughReplicasException e) {
   final String message = Failed to place enough replicas, still in need 
of 
   + (totalReplicasExpected - results.size()) +  to reach 
@@ -422,7 +389,55 @@ public class BlockPlacementPolicyDefault extends 
BlockPlacementPolicy {
 }
 return writer;
   }
-
+
+  protected Node chooseTargetInOrder(int numOfReplicas, 
+