HDFS-9393. After choosing favored nodes, choosing nodes for remaining replicas should go through BlockPlacementPolicy (Contributed by J.Andreina)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfadf11b Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfadf11b Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfadf11b Branch: refs/heads/yarn-2877 Commit: bfadf11b36e9d97e03d6ed1e71829907c2301412 Parents: 061c05c Author: Vinayakumar B <[email protected]> Authored: Fri Dec 18 11:38:12 2015 +0530 Committer: Vinayakumar B <[email protected]> Committed: Fri Dec 18 11:38:12 2015 +0530 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 ++ .../BlockPlacementPolicyDefault.java | 15 ++++++-- .../blockmanagement/TestReplicationPolicy.java | 40 ++++++++++++++++++++ .../TestReplicationPolicyWithNodeGroup.java | 32 ++++++++++++++++ 4 files changed, 87 insertions(+), 4 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfadf11b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4af15d9..1bb5742 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -2519,6 +2519,10 @@ Release 2.8.0 - UNRELEASED HDFS-9571. Fix ASF Licence warnings in Jenkins reports (Brahma Reddy Battula via cnauroth) + HDFS-9393. After choosing favored nodes, choosing nodes for remaining + replicas should go through BlockPlacementPolicy + (J.Andreina via vinayakumarb) + Release 2.7.3 - UNRELEASED INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfadf11b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 08e7851..14439e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -147,11 +147,18 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { avoidStaleNodes, storageTypes); if (results.size() < numOfReplicas) { - // Not enough favored nodes, choose other nodes. + // Not enough favored nodes, choose other nodes, based on block + // placement policy (HDFS-9393). numOfReplicas -= results.size(); - DatanodeStorageInfo[] remainingTargets = - chooseTarget(src, numOfReplicas, writer, results, - false, favoriteAndExcludedNodes, blocksize, storagePolicy); + for (DatanodeStorageInfo storage : results) { + // add localMachine and related nodes to favoriteAndExcludedNodes + addToExcludedNodes(storage.getDatanodeDescriptor(), + favoriteAndExcludedNodes); + } + DatanodeStorageInfo[] remainingTargets = + chooseTarget(src, numOfReplicas, writer, + new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, + favoriteAndExcludedNodes, blocksize, storagePolicy); for (int i = 0; i < remainingTargets.length; i++) { results.add(remainingTargets[i]); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfadf11b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index 3493c14..518a359 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -1464,4 +1464,44 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest { chosenBlocks = underReplicatedBlocks.chooseUnderReplicatedBlocks(1); assertTheChosenBlocks(chosenBlocks, 1, 0, 0, 0, 0); } + + /** + * In this testcase, passed 2 favored nodes dataNodes[0],dataNodes[1] + * + * Both favored nodes should be chosen as target for placing replication and + * then should fall into BlockPlacement policy for choosing remaining targets + * ie. third target as local writer rack , forth target on remote rack and + * fifth on same rack as second. + * + * @throws Exception + */ + @Test + public void testChooseExcessReplicaApartFromFavoredNodes() throws Exception { + DatanodeStorageInfo[] targets; + List<DatanodeDescriptor> expectedTargets = + new ArrayList<DatanodeDescriptor>(); + expectedTargets.add(dataNodes[0]); + expectedTargets.add(dataNodes[1]); + expectedTargets.add(dataNodes[2]); + expectedTargets.add(dataNodes[4]); + expectedTargets.add(dataNodes[5]); + List<DatanodeDescriptor> favouredNodes = + new ArrayList<DatanodeDescriptor>(); + favouredNodes.add(dataNodes[0]); + favouredNodes.add(dataNodes[1]); + targets = chooseTarget(5, dataNodes[2], null, favouredNodes); + assertEquals(targets.length, 5); + for (int i = 0; i < targets.length; i++) { + assertTrue("Target should be a part of Expected Targets", + expectedTargets.contains(targets[i].getDatanodeDescriptor())); + } + } + + private DatanodeStorageInfo[] chooseTarget(int numOfReplicas, + DatanodeDescriptor writer, Set<Node> excludedNodes, + List<DatanodeDescriptor> favoredNodes) { + return replicator.chooseTarget(filename, numOfReplicas, writer, + excludedNodes, BLOCK_SIZE, favoredNodes, + TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfadf11b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index b46983c..8ba9fb6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -781,4 +781,36 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes assertTrue("2nd Replica is incorrect", expectedTargets.contains(targets[1].getDatanodeDescriptor())); } + + /** + * In this testcase, passed 3 favored nodes + * dataNodes[0],dataNodes[1],dataNodes[2] + * + * Favored nodes on different nodegroup should be selected. Remaining replica + * should go through BlockPlacementPolicy. + * + * @throws Exception + */ + @Test + public void testChooseRemainingReplicasApartFromFavoredNodes() + throws Exception { + DatanodeStorageInfo[] targets; + List<DatanodeDescriptor> expectedTargets = + new ArrayList<DatanodeDescriptor>(); + expectedTargets.add(dataNodes[0]); + expectedTargets.add(dataNodes[2]); + expectedTargets.add(dataNodes[3]); + expectedTargets.add(dataNodes[6]); + expectedTargets.add(dataNodes[7]); + List<DatanodeDescriptor> favouredNodes = + new ArrayList<DatanodeDescriptor>(); + favouredNodes.add(dataNodes[0]); + favouredNodes.add(dataNodes[1]); + favouredNodes.add(dataNodes[2]); + targets = chooseTarget(3, dataNodes[3], null, favouredNodes); + for (int i = 0; i < targets.length; i++) { + assertTrue("Target should be a part of Expected Targets", + expectedTargets.contains(targets[i].getDatanodeDescriptor())); + } + } }
