Author: eli
Date: Thu May 10 20:08:41 2012
New Revision: 1336867
URL: http://svn.apache.org/viewvc?rev=1336867&view=rev
Log:
HDFS-3230. svn merge -c 1336815 from trunk
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/ (props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/ (props
changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
(props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
(props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
(props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
(props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
(props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
(props changed)
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project:r1336815
Propchange: hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/
------------------------------------------------------------------------------
Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1336815
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
--- hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
(original)
+++ hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
Thu May 10 20:08:41 2012
@@ -515,6 +515,8 @@ Release 2.0.0 - UNRELEASED
HDFS-3395. NN doesn't start with HA+security enabled and HTTP address set
to 0.0.0.0. (atm)
+ HDFS-3230. Cleanup DatanodeID creation in the tests. (eli)
+
BREAKDOWN OF HDFS-1623 SUBTASKS
HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)
Propchange:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1336815
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
Thu May 10 20:08:41 2012
@@ -40,7 +40,6 @@ import org.apache.hadoop.hdfs.protocol.L
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import
org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.io.MD5Hash;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
Propchange:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/native/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1336815
Propchange:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1336815
Propchange:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1336815
Propchange:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1336815
Propchange:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/
------------------------------------------------------------------------------
Merged
/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1336815
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
Thu May 10 20:08:41 2012
@@ -706,13 +706,23 @@ public class DFSTestUtil {
.join(nameservices));
}
+ public static DatanodeID getLocalDatanodeID() {
+ return new DatanodeID("127.0.0.1", "localhost",
DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT);
+ }
+
+ public static DatanodeID getLocalDatanodeID(int port) {
+ return new DatanodeID("127.0.0.1", "localhost", "", port, port, port);
+ }
+
public static DatanodeDescriptor getLocalDatanodeDescriptor() {
- return new DatanodeDescriptor(
- new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+ return new DatanodeDescriptor(getLocalDatanodeID());
}
public static DatanodeInfo getLocalDatanodeInfo() {
- return new DatanodeInfo(
- new DatanodeID("127.0.0.1", DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT));
+ return new DatanodeInfo(getLocalDatanodeID());
+ }
+
+ public static DatanodeInfo getLocalDatanodeInfo(int port) {
+ return new DatanodeInfo(getLocalDatanodeID(port));
}
}
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
Thu May 10 20:08:41 2012
@@ -626,8 +626,7 @@ public class TestDFSClientRetries extend
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
- DatanodeID fakeDnId = new DatanodeID(
- "localhost", "localhost", "fake-storage", addr.getPort(), 0,
addr.getPort());
+ DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestReplaceDatanodeOnFailure.java
Thu May 10 20:08:41 2012
@@ -62,7 +62,7 @@ public class TestReplaceDatanodeOnFailur
final DatanodeInfo[][] datanodes = new DatanodeInfo[infos.length + 1][];
datanodes[0] = new DatanodeInfo[0];
for(int i = 0; i < infos.length; ) {
- infos[i] = new DatanodeInfo(new DatanodeID("dn" + i, 100));
+ infos[i] = DFSTestUtil.getLocalDatanodeInfo(50020 + i);
i++;
datanodes[i] = new DatanodeInfo[i];
System.arraycopy(infos, 0, datanodes[i], 0, datanodes[i].length);
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocolPB/TestPBHelper.java
Thu May 10 20:08:41 2012
@@ -131,7 +131,7 @@ public class TestPBHelper {
@Test
public void testConvertDatanodeID() {
- DatanodeID dn = new DatanodeID("node", "node", "sid", 1, 2, 3);
+ DatanodeID dn = DFSTestUtil.getLocalDatanodeID();
DatanodeIDProto dnProto = PBHelper.convert(dn);
DatanodeID dn2 = PBHelper.convert(dnProto);
compare(dn, dn2);
@@ -280,10 +280,6 @@ public class TestPBHelper {
return new ExtendedBlock("bpid", blkid, 100, 2);
}
- private DatanodeInfo getDNInfo() {
- return new DatanodeInfo(new DatanodeID("node", "node", "sid", 0, 1, 2));
- }
-
private void compare(DatanodeInfo dn1, DatanodeInfo dn2) {
assertEquals(dn1.getAdminState(), dn2.getAdminState());
assertEquals(dn1.getBlockPoolUsed(), dn2.getBlockPoolUsed());
@@ -316,7 +312,9 @@ public class TestPBHelper {
@Test
public void testConvertRecoveringBlock() {
- DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
+ DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
RecoveringBlock b = new RecoveringBlock(getExtendedBlock(), dnInfo, 3);
RecoveringBlockProto bProto = PBHelper.convert(b);
RecoveringBlock b1 = PBHelper.convert(bProto);
@@ -330,7 +328,9 @@ public class TestPBHelper {
@Test
public void testConvertBlockRecoveryCommand() {
- DatanodeInfo[] dnInfo = new DatanodeInfo[] { getDNInfo(), getDNInfo() };
+ DatanodeInfo di1 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo di2 = DFSTestUtil.getLocalDatanodeInfo();
+ DatanodeInfo[] dnInfo = new DatanodeInfo[] { di1, di2 };
List<RecoveringBlock> blks = ImmutableList.of(
new RecoveringBlock(getExtendedBlock(1), dnInfo, 3),
@@ -401,11 +401,14 @@ public class TestPBHelper {
@Test
public void testConvertLocatedBlock() {
DatanodeInfo [] dnInfos = new DatanodeInfo[3];
- dnInfos[0] = new DatanodeInfo("host0", "host0", "0", 5000, 5001, 5002,
20000, 10001, 9999,
+ dnInfos[0] = new DatanodeInfo("127.0.0.1", "host1", "0",
+ 5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.DECOMMISSION_INPROGRESS);
- dnInfos[1] = new DatanodeInfo("host1", "host1", "1", 5000, 5001, 5002,
20000, 10001, 9999,
+ dnInfos[1] = new DatanodeInfo("127.0.0.1", "host2", "1",
+ 5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.DECOMMISSIONED);
- dnInfos[2] = new DatanodeInfo("host2", "host2", "2", 5000, 5001, 5002,
20000, 10001, 9999,
+ dnInfos[2] = new DatanodeInfo("127.0.0.1", "host3", "2",
+ 5000, 5001, 5002, 20000, 10001, 9999,
59, 69, 32, "local", AdminStates.NORMAL);
LocatedBlock lb = new LocatedBlock(
new ExtendedBlock("bp12", 12345, 10, 53), dnInfos, 5, false);
@@ -424,7 +427,7 @@ public class TestPBHelper {
@Test
public void testConvertDatanodeRegistration() {
- DatanodeID dnId = new DatanodeID("host", "host", "xyz", 0, 1, 0);
+ DatanodeID dnId = DFSTestUtil.getLocalDatanodeID();
BlockKey[] keys = new BlockKey[] { getBlockKey(2), getBlockKey(3) };
ExportedBlockKeys expKeys = new ExportedBlockKeys(true, 9, 10,
getBlockKey(1), keys);
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
Thu May 10 20:08:41 2012
@@ -42,6 +42,7 @@ import org.apache.hadoop.fs.FSDataOutput
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -279,8 +280,7 @@ public class TestBlockToken {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
- DatanodeID fakeDnId = new DatanodeID("localhost",
- "localhost", "fake-storage", addr.getPort(), 0, addr.getPort());
+ DatanodeID fakeDnId = DFSTestUtil.getLocalDatanodeID(addr.getPort());
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
Thu May 10 20:08:41 2012
@@ -47,17 +47,10 @@ import com.google.common.collect.LinkedL
import com.google.common.collect.Lists;
public class TestBlockManager {
- private final List<DatanodeDescriptor> nodes = ImmutableList.of(
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/rackA"),
- new DatanodeDescriptor(new DatanodeID("h4", 5020), "/rackB"),
- new DatanodeDescriptor(new DatanodeID("h5", 5020), "/rackB"),
- new DatanodeDescriptor(new DatanodeID("h6", 5020), "/rackB")
- );
- private final List<DatanodeDescriptor> rackA = nodes.subList(0, 3);
- private final List<DatanodeDescriptor> rackB = nodes.subList(3, 6);
-
+ private List<DatanodeDescriptor> nodes;
+ private List<DatanodeDescriptor> rackA;
+ private List<DatanodeDescriptor> rackB;
+
/**
* Some of these tests exercise code which has some randomness involved -
* ie even if there's a bug, they may pass because the random node selection
@@ -82,6 +75,16 @@ public class TestBlockManager {
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
bm = new BlockManager(fsn, fsn, conf);
+ nodes = ImmutableList.of(
+ new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/rackA"),
+ new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/rackB"),
+ new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/rackB"),
+ new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/rackB")
+ );
+ rackA = nodes.subList(0, 3);
+ rackB = nodes.subList(3, 6);
}
private void addNodes(Iterable<DatanodeDescriptor> nodesToAdd) {
@@ -116,7 +119,7 @@ public class TestBlockManager {
}
private void doBasicTest(int testIndex) {
- List<DatanodeDescriptor> origNodes = nodes(0, 1);
+ List<DatanodeDescriptor> origNodes = getNodes(0, 1);
BlockInfo blockInfo = addBlockOnNodes((long)testIndex, origNodes);
DatanodeDescriptor[] pipeline = scheduleSingleReplication(blockInfo);
@@ -147,7 +150,7 @@ public class TestBlockManager {
private void doTestTwoOfThreeNodesDecommissioned(int testIndex) throws
Exception {
// Block originally on A1, A2, B1
- List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
+ List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission two of the nodes (A1, A2)
@@ -157,7 +160,7 @@ public class TestBlockManager {
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origNodes.contains(pipeline[0]));
- assertEquals("Should have two targets", 3, pipeline.length);
+ assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackA = false;
for (int i = 1; i < pipeline.length; i++) {
@@ -190,7 +193,7 @@ public class TestBlockManager {
private void doTestAllNodesHoldingReplicasDecommissioned(int testIndex)
throws Exception {
// Block originally on A1, A2, B1
- List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
+ List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes
@@ -242,7 +245,7 @@ public class TestBlockManager {
private void doTestOneOfTwoRacksDecommissioned(int testIndex) throws
Exception {
// Block originally on A1, A2, B1
- List<DatanodeDescriptor> origNodes = nodes(0, 1, 3);
+ List<DatanodeDescriptor> origNodes = getNodes(0, 1, 3);
BlockInfo blockInfo = addBlockOnNodes(testIndex, origNodes);
// Decommission all of the nodes in rack A
@@ -252,7 +255,7 @@ public class TestBlockManager {
assertTrue("Source of replication should be one of the nodes the block " +
"was on. Was: " + pipeline[0],
origNodes.contains(pipeline[0]));
- assertEquals("Should have 2 targets", 3, pipeline.length);
+ assertEquals("Should have three targets", 3, pipeline.length);
boolean foundOneOnRackB = false;
for (int i = 1; i < pipeline.length; i++) {
@@ -273,7 +276,8 @@ public class TestBlockManager {
// the block is still under-replicated. Add a new node. This should allow
// the third off-rack replica.
- DatanodeDescriptor rackCNode = new DatanodeDescriptor(new DatanodeID("h7",
100), "/rackC");
+ DatanodeDescriptor rackCNode =
+ new DatanodeDescriptor(new DatanodeID("7.7.7.7", 100), "/rackC");
addNodes(ImmutableList.of(rackCNode));
try {
DatanodeDescriptor[] pipeline2 = scheduleSingleReplication(blockInfo);
@@ -359,7 +363,7 @@ public class TestBlockManager {
return blockInfo;
}
- private List<DatanodeDescriptor> nodes(int ... indexes) {
+ private List<DatanodeDescriptor> getNodes(int ... indexes) {
List<DatanodeDescriptor> ret = Lists.newArrayList();
for (int idx : indexes) {
ret.add(nodes.get(idx));
@@ -368,7 +372,7 @@ public class TestBlockManager {
}
private List<DatanodeDescriptor> startDecommission(int ... indexes) {
- List<DatanodeDescriptor> nodes = nodes(indexes);
+ List<DatanodeDescriptor> nodes = getNodes(indexes);
for (DatanodeDescriptor node : nodes) {
node.startDecommission();
}
@@ -404,8 +408,9 @@ public class TestBlockManager {
LinkedListMultimap<DatanodeDescriptor, BlockTargetPair> repls =
getAllPendingReplications();
assertEquals(1, repls.size());
- Entry<DatanodeDescriptor, BlockTargetPair> repl = repls.entries()
- .iterator().next();
+ Entry<DatanodeDescriptor, BlockTargetPair> repl =
+ repls.entries().iterator().next();
+
DatanodeDescriptor[] targets = repl.getValue().targets;
DatanodeDescriptor[] pipeline = new DatanodeDescriptor[1 + targets.length];
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestHost2NodesMap.java
Thu May 10 20:08:41 2012
@@ -18,73 +18,75 @@
package org.apache.hadoop.hdfs.server.blockmanagement;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
-
import org.apache.hadoop.hdfs.protocol.DatanodeID;
+
import org.junit.Before;
import org.junit.Test;
+import static org.junit.Assert.*;
+
public class TestHost2NodesMap {
private Host2NodesMap map = new Host2NodesMap();
- private final DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("ip1", "h1", "", 5020, -1, -1),
"/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("ip2", "h1", "", 5020, -1, -1),
"/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5020, -1, -1),
"/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("ip3", "h1", "", 5030, -1, -1),
"/d1/r2"),
- };
- private final DatanodeDescriptor NULL_NODE = null;
- private final DatanodeDescriptor NODE = new DatanodeDescriptor(new
DatanodeID("h3", 5040),
- "/d1/r4");
-
+ private DatanodeDescriptor dataNodes[];
+
@Before
public void setup() {
- for(DatanodeDescriptor node:dataNodes) {
+ dataNodes = new DatanodeDescriptor[] {
+ new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5030), "/d1/r2"),
+ };
+ for (DatanodeDescriptor node : dataNodes) {
map.add(node);
}
- map.add(NULL_NODE);
+ map.add(null);
}
@Test
public void testContains() throws Exception {
- for(int i=0; i<dataNodes.length; i++) {
+ DatanodeDescriptor nodeNotInMap =
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
+ for (int i = 0; i < dataNodes.length; i++) {
assertTrue(map.contains(dataNodes[i]));
}
- assertFalse(map.contains(NULL_NODE));
- assertFalse(map.contains(NODE));
+ assertFalse(map.contains(null));
+ assertFalse(map.contains(nodeNotInMap));
}
@Test
public void testGetDatanodeByHost() throws Exception {
- assertTrue(map.getDatanodeByHost("ip1")==dataNodes[0]);
- assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
- DatanodeDescriptor node = map.getDatanodeByHost("ip3");
- assertTrue(node==dataNodes[2] || node==dataNodes[3]);
- assertTrue(null==map.getDatanodeByHost("ip4"));
+ assertEquals(map.getDatanodeByHost("1.1.1.1"), dataNodes[0]);
+ assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
+ DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
+ assertTrue(node == dataNodes[2] || node == dataNodes[3]);
+ assertNull(map.getDatanodeByHost("4.4.4.4"));
}
@Test
public void testRemove() throws Exception {
- assertFalse(map.remove(NODE));
+ DatanodeDescriptor nodeNotInMap =
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5040), "/d1/r4");
+ assertFalse(map.remove(nodeNotInMap));
assertTrue(map.remove(dataNodes[0]));
- assertTrue(map.getDatanodeByHost("ip1")==null);
- assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
- DatanodeDescriptor node = map.getDatanodeByHost("ip3");
+ assertTrue(map.getDatanodeByHost("1.1.1.1.")==null);
+ assertTrue(map.getDatanodeByHost("2.2.2.2")==dataNodes[1]);
+ DatanodeDescriptor node = map.getDatanodeByHost("3.3.3.3");
assertTrue(node==dataNodes[2] || node==dataNodes[3]);
- assertTrue(null==map.getDatanodeByHost("ip4"));
+ assertNull(map.getDatanodeByHost("4.4.4.4"));
assertTrue(map.remove(dataNodes[2]));
- assertTrue(map.getDatanodeByHost("ip1")==null);
- assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
- assertTrue(map.getDatanodeByHost("ip3")==dataNodes[3]);
+ assertNull(map.getDatanodeByHost("1.1.1.1"));
+ assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
+ assertEquals(map.getDatanodeByHost("3.3.3.3"), dataNodes[3]);
assertTrue(map.remove(dataNodes[3]));
- assertTrue(map.getDatanodeByHost("ip1")==null);
- assertTrue(map.getDatanodeByHost("ip2")==dataNodes[1]);
- assertTrue(map.getDatanodeByHost("ip3")==null);
+ assertNull(map.getDatanodeByHost("1.1.1.1"));
+ assertEquals(map.getDatanodeByHost("2.2.2.2"), dataNodes[1]);
+ assertNull(map.getDatanodeByHost("3.3.3.3"));
- assertFalse(map.remove(NULL_NODE));
+ assertFalse(map.remove(null));
assertTrue(map.remove(dataNodes[1]));
assertFalse(map.remove(dataNodes[1]));
}
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingDataNodeMessages.java
Thu May 10 20:08:41 2012
@@ -21,6 +21,7 @@ import static org.junit.Assert.*;
import java.util.Queue;
+import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import
org.apache.hadoop.hdfs.server.blockmanagement.PendingDataNodeMessages.ReportedBlockInfo;
@@ -38,12 +39,10 @@ public class TestPendingDataNodeMessages
private final Block block1Gs2DifferentInstance =
new Block(1, 0, 2);
private final Block block2Gs1 = new Block(2, 0, 1);
-
- private final DatanodeDescriptor fakeDN = new DatanodeDescriptor(
- new DatanodeID("fake", 100));
-
+
@Test
public void testQueues() {
+ DatanodeDescriptor fakeDN = DFSTestUtil.getLocalDatanodeDescriptor();
msgs.enqueueReportedBlock(fakeDN, block1Gs1, ReplicaState.FINALIZED);
msgs.enqueueReportedBlock(fakeDN, block1Gs2, ReplicaState.FINALIZED);
@@ -56,8 +55,8 @@ public class TestPendingDataNodeMessages
Queue<ReportedBlockInfo> q =
msgs.takeBlockQueue(block1Gs2DifferentInstance);
assertEquals(
- "ReportedBlockInfo [block=blk_1_1, dn=fake:100,
reportedState=FINALIZED]," +
- "ReportedBlockInfo [block=blk_1_2, dn=fake:100,
reportedState=FINALIZED]",
+ "ReportedBlockInfo [block=blk_1_1, dn=127.0.0.1:50010,
reportedState=FINALIZED]," +
+ "ReportedBlockInfo [block=blk_1_2, dn=127.0.0.1:50010,
reportedState=FINALIZED]",
Joiner.on(",").join(q));
assertEquals(0, msgs.count());
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
Thu May 10 20:08:41 2012
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.bl
import static org.junit.Assert.*;
-import java.io.IOException;
+import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
@@ -39,54 +39,55 @@ import org.apache.hadoop.hdfs.protocol.H
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
+import org.junit.BeforeClass;
import org.junit.Test;
public class TestReplicationPolicy {
- private Random random= DFSUtil.getRandom();
+ private Random random = DFSUtil.getRandom();
private static final int BLOCK_SIZE = 1024;
private static final int NUM_OF_DATANODES = 6;
- private static final Configuration CONF = new HdfsConfiguration();
- private static final NetworkTopology cluster;
- private static final NameNode namenode;
- private static final BlockPlacementPolicy replicator;
+ private static NetworkTopology cluster;
+ private static NameNode namenode;
+ private static BlockPlacementPolicy replicator;
private static final String filename = "/dummyfile.txt";
- private static final DatanodeDescriptor dataNodes[] =
- new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d2/r3"),
- new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3")
- };
-
- private final static DatanodeDescriptor NODE =
- new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r4");
-
- static {
- try {
- FileSystem.setDefaultUri(CONF, "hdfs://localhost:0");
- CONF.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
- DFSTestUtil.formatNameNode(CONF);
- namenode = new NameNode(CONF);
- } catch (IOException e) {
- e.printStackTrace();
- throw (RuntimeException)new RuntimeException().initCause(e);
- }
+ private static DatanodeDescriptor dataNodes[];
+
+ @BeforeClass
+ public static void setupCluster() throws Exception {
+ Configuration conf = new HdfsConfiguration();
+ dataNodes = new DatanodeDescriptor[] {
+ new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d2/r3"),
+ new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3")
+ };
+
+ FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
+ File baseDir = new File(System.getProperty(
+ "test.build.data", "build/test/data"), "dfs/");
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ new File(baseDir, "name").getPath());
+
+ DFSTestUtil.formatNameNode(conf);
+ namenode = new NameNode(conf);
+
final BlockManager bm = namenode.getNamesystem().getBlockManager();
replicator = bm.getBlockPlacementPolicy();
cluster = bm.getDatanodeManager().getNetworkTopology();
// construct network topology
- for(int i=0; i<NUM_OF_DATANODES; i++) {
+ for (int i=0; i < NUM_OF_DATANODES; i++) {
cluster.add(dataNodes[i]);
}
- for(int i=0; i<NUM_OF_DATANODES; i++) {
+ for (int i=0; i < NUM_OF_DATANODES; i++) {
dataNodes[i].updateHeartbeat(
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L,
2*HdfsConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0L, 0, 0);
- }
+ }
}
-
+
/**
* In this testcase, client is dataNodes[0]. So the 1st replica should be
* placed on dataNodes[0], the 2nd replica should be placed on
@@ -337,22 +338,25 @@ public class TestReplicationPolicy {
*/
@Test
public void testChooseTarget5() throws Exception {
+ DatanodeDescriptor writerDesc =
+ new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r4");
+
DatanodeDescriptor[] targets;
targets = replicator.chooseTarget(filename,
- 0, NODE, BLOCK_SIZE);
+ 0, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 0);
targets = replicator.chooseTarget(filename,
- 1, NODE, BLOCK_SIZE);
+ 1, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 1);
targets = replicator.chooseTarget(filename,
- 2, NODE, BLOCK_SIZE);
+ 2, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 2);
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
targets = replicator.chooseTarget(filename,
- 3, NODE, BLOCK_SIZE);
+ 3, writerDesc, BLOCK_SIZE);
assertEquals(targets.length, 3);
assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
Modified:
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
URL:
http://svn.apache.org/viewvc/hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java?rev=1336867&r1=1336866&r2=1336867&view=diff
==============================================================================
---
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
(original)
+++
hadoop/common/branches/branch-2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/net/TestNetworkTopology.java
Thu May 10 20:08:41 2012
@@ -18,52 +18,59 @@
package org.apache.hadoop.net;
-
import java.util.HashMap;
import java.util.Map;
-import junit.framework.TestCase;
-
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-public class TestNetworkTopology extends TestCase {
+import org.junit.Test;
+import org.junit.Before;
+
+import static org.junit.Assert.*;
+
+public class TestNetworkTopology {
private final static NetworkTopology cluster = new NetworkTopology();
- private final static DatanodeDescriptor dataNodes[] = new
DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h4", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h5", 5020), "/d1/r2"),
- new DatanodeDescriptor(new DatanodeID("h6", 5020), "/d2/r3"),
- new DatanodeDescriptor(new DatanodeID("h7", 5020), "/d2/r3")
- };
- private final static DatanodeDescriptor NODE =
- new DatanodeDescriptor(new DatanodeID("h8", 5020), "/d2/r4");
+ private DatanodeDescriptor dataNodes[];
- static {
- for(int i=0; i<dataNodes.length; i++) {
+ @Before
+ public void setupDatanodes() {
+ dataNodes = new DatanodeDescriptor[] {
+ new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("4.4.4.4", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("5.5.5.5", 5020), "/d1/r2"),
+ new DatanodeDescriptor(new DatanodeID("6.6.6.6", 5020), "/d2/r3"),
+ new DatanodeDescriptor(new DatanodeID("7.7.7.7", 5020), "/d2/r3")
+ };
+ for (int i = 0; i < dataNodes.length; i++) {
cluster.add(dataNodes[i]);
}
}
+ @Test
public void testContains() throws Exception {
- for(int i=0; i<dataNodes.length; i++) {
+ DatanodeDescriptor nodeNotInMap =
+ new DatanodeDescriptor(new DatanodeID("8.8.8.8", 5020), "/d2/r4");
+ for (int i=0; i < dataNodes.length; i++) {
assertTrue(cluster.contains(dataNodes[i]));
}
- assertFalse(cluster.contains(NODE));
+ assertFalse(cluster.contains(nodeNotInMap));
}
+ @Test
public void testNumOfChildren() throws Exception {
assertEquals(cluster.getNumOfLeaves(), dataNodes.length);
}
+ @Test
public void testCreateInvalidTopology() throws Exception {
NetworkTopology invalCluster = new NetworkTopology();
DatanodeDescriptor invalDataNodes[] = new DatanodeDescriptor[] {
- new DatanodeDescriptor(new DatanodeID("h1", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h2", 5020), "/d1/r1"),
- new DatanodeDescriptor(new DatanodeID("h3", 5020), "/d1")
+ new DatanodeDescriptor(new DatanodeID("1.1.1.1", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("2.2.2.2", 5020), "/d1/r1"),
+ new DatanodeDescriptor(new DatanodeID("3.3.3.3", 5020), "/d1")
};
invalCluster.add(invalDataNodes[0]);
invalCluster.add(invalDataNodes[1]);
@@ -77,6 +84,7 @@ public class TestNetworkTopology extends
}
}
+ @Test
public void testRacks() throws Exception {
assertEquals(cluster.getNumOfRacks(), 3);
assertTrue(cluster.isOnSameRack(dataNodes[0], dataNodes[1]));
@@ -87,6 +95,7 @@ public class TestNetworkTopology extends
assertTrue(cluster.isOnSameRack(dataNodes[5], dataNodes[6]));
}
+ @Test
public void testGetDistance() throws Exception {
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[0]), 0);
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[1]), 2);
@@ -94,6 +103,7 @@ public class TestNetworkTopology extends
assertEquals(cluster.getDistance(dataNodes[0], dataNodes[6]), 6);
}
+ @Test
public void testPseudoSortByDistance() throws Exception {
DatanodeDescriptor[] testNodes = new DatanodeDescriptor[3];
@@ -136,6 +146,7 @@ public class TestNetworkTopology extends
assertTrue(testNodes[2] == dataNodes[3]);
}
+ @Test
public void testRemove() throws Exception {
for(int i=0; i<dataNodes.length; i++) {
cluster.remove(dataNodes[i]);
@@ -173,6 +184,7 @@ public class TestNetworkTopology extends
/**
* This test checks that chooseRandom works for an excluded node.
*/
+ @Test
public void testChooseRandomExcludedNode() {
String scope = "~" + NodeBase.getPath(dataNodes[0]);
Map<Node, Integer> frequency = pickNodesAtRandom(100, scope);
@@ -186,6 +198,7 @@ public class TestNetworkTopology extends
/**
* This test checks that chooseRandom works for an excluded rack.
*/
+ @Test
public void testChooseRandomExcludedRack() {
Map<Node, Integer> frequency = pickNodesAtRandom(100, "~" + "/d2");
// all the nodes on the second rack should be zero