Repository: hadoop Updated Branches: refs/heads/branch-2.6 020ef7f5b -> bddc6cd3f
HDFS-9365. Balaner does not work with the HDFS-6376 HA setup. (cherry picked from commit d3bdea7f7f94332ffe51fb65eec1f219fbf6657f) Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bddc6cd3 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bddc6cd3 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bddc6cd3 Branch: refs/heads/branch-2.6 Commit: bddc6cd3fb7f07f2845a8b2899eb30bd367ad9eb Parents: 020ef7f Author: Tsz-Wo Nicholas Sze <[email protected]> Authored: Tue May 24 12:49:48 2016 -0700 Committer: Sangjin Lee <[email protected]> Committed: Tue Sep 13 20:28:39 2016 -0700 ---------------------------------------------------------------------- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 ++ .../java/org/apache/hadoop/hdfs/DFSUtil.java | 19 ++++++---- .../hadoop/hdfs/server/balancer/Balancer.java | 2 +- .../apache/hadoop/hdfs/server/mover/Mover.java | 2 +- .../org/apache/hadoop/hdfs/TestDFSUtil.java | 38 +++++++++++++------- .../hdfs/server/balancer/TestBalancer.java | 6 ++-- .../balancer/TestBalancerWithHANameNodes.java | 2 +- .../TestBalancerWithMultipleNameNodes.java | 2 +- .../balancer/TestBalancerWithNodeGroup.java | 4 +-- .../hadoop/hdfs/server/mover/TestMover.java | 12 +++---- .../hdfs/server/mover/TestStorageMover.java | 2 +- 11 files changed, 57 insertions(+), 35 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index bef4d6a..8795ad0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -42,6 +42,9 @@ Release 2.6.5 - UNRELEASED HDFS-10178. Permanent write failures can happen if pipeline recoveries occur for the first packet (kihwal) + HDFS-9365. Balancer does not work with the HDFS-6376 HA setup. (Tsz Wo + Nicholas Sze) + Release 2.6.4 - 2016-02-11 INCOMPATIBLE CHANGES http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java index 67880cd..73b2556 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java @@ -962,7 +962,14 @@ public class DFSUtil { "nnId=" + namenodeId + ";addr=" + addr + "]"; } } - + + /** @return Internal name services specified in the conf. */ + static Collection<String> getInternalNameServices(Configuration conf) { + final Collection<String> ids = conf.getTrimmedStringCollection( + DFSConfigKeys.DFS_INTERNAL_NAMESERVICES_KEY); + return !ids.isEmpty()? ids: getNameServiceIds(conf); + } + /** * Get a URI for each internal nameservice. If a nameservice is * HA-enabled, and the configured failover proxy provider supports logical @@ -975,8 +982,8 @@ public class DFSUtil { * @return a collection of all configured NN URIs, preferring service * addresses */ - public static Collection<URI> getNsServiceRpcUris(Configuration conf) { - return getNameServiceUris(conf, + public static Collection<URI> getInternalNsRpcUris(Configuration conf) { + return getNameServiceUris(conf, getInternalNameServices(conf), DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY); } @@ -993,8 +1000,8 @@ public class DFSUtil { * nameservices * @return a collection of all configured NN URIs */ - public static Collection<URI> getNameServiceUris(Configuration conf, - String... keys) { + static Collection<URI> getNameServiceUris(Configuration conf, + Collection<String> nameServices, String... keys) { Set<URI> ret = new HashSet<URI>(); // We're passed multiple possible configuration keys for any given NN or HA @@ -1004,7 +1011,7 @@ public class DFSUtil { // keep track of non-preferred keys here. Set<URI> nonPreferredUris = new HashSet<URI>(); - for (String nsId : getNameServiceIds(conf)) { + for (String nsId : nameServices) { URI nsUri; try { nsUri = new URI(HdfsConstants.HDFS_URI_SCHEME + "://" + nsId); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java index eeac6ee..cc05368 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java @@ -670,7 +670,7 @@ public class Balancer { try { checkReplicationPolicyCompatibility(conf); - final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); return Balancer.run(namenodes, parse(args), conf); } catch (IOException e) { System.out.println(e + ". Exiting ..."); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java index 59814af..95c8dd7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java @@ -577,7 +577,7 @@ public class Mover { } else if (line.hasOption("p")) { paths = line.getOptionValues("p"); } - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); if (paths == null || paths.length == 0) { for (URI namenode : namenodes) { map.put(namenode, null); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java index 2a43db2..af23ddb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUtil.java @@ -79,6 +79,8 @@ import org.junit.Assume; import org.junit.Before; import org.junit.Test; +import com.google.common.collect.Sets; + public class TestDFSUtil { /** @@ -533,7 +535,7 @@ public class TestDFSUtil { ".ns2"; conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha." + "ConfiguredFailoverProxyProvider"); - Collection<URI> uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); + Collection<URI> uris = getInternalNameServiceUris(conf, DFS_NAMENODE_RPC_ADDRESS_KEY); assertEquals(2, uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); assertTrue(uris.contains(new URI("hdfs://ns2"))); @@ -616,7 +618,13 @@ public class TestDFSUtil { assertEquals("127.0.0.1:12345", DFSUtil.substituteForWildcardAddress("127.0.0.1:12345", "foo")); } - + + private static Collection<URI> getInternalNameServiceUris(Configuration conf, + String... keys) { + final Collection<String> ids = DFSUtil.getInternalNameServices(conf); + return DFSUtil.getNameServiceUris(conf, ids, keys); + } + @Test public void testGetNNUris() throws Exception { HdfsConfiguration conf = new HdfsConfiguration(); @@ -659,8 +667,7 @@ public class TestDFSUtil { ".ns1"; conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha." + "IPFailoverProxyProvider"); - Collection<URI> uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + Collection<URI> uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 4, uris.size()); assertTrue("Missing URI for name service ns1", @@ -680,8 +687,7 @@ public class TestDFSUtil { conf.set(proxyProviderKey, "org.apache.hadoop.hdfs.server.namenode.ha." + "ConfiguredFailoverProxyProvider"); - uris = DFSUtil.getNameServiceUris(conf, - DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals("Incorrect number of URIs returned", 4, uris.size()); assertTrue("Missing URI for name service ns1", @@ -697,8 +703,7 @@ public class TestDFSUtil { conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "viewfs://vfs-name.example.com"); - uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals(3, uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); @@ -709,8 +714,7 @@ public class TestDFSUtil { // entries being returned. conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://ns1"); - uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals(3, uris.size()); assertTrue(uris.contains(new URI("hdfs://ns1"))); @@ -726,8 +730,7 @@ public class TestDFSUtil { conf.set(DFS_NAMENODE_RPC_ADDRESS_KEY, NN1_ADDR); conf.set(DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, NN1_SRVC_ADDR); - uris = DFSUtil.getNameServiceUris(conf, DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY, - DFS_NAMENODE_RPC_ADDRESS_KEY); + uris = DFSUtil.getInternalNsRpcUris(conf); assertEquals(1, uris.size()); assertTrue(uris.contains(new URI("hdfs://" + NN1_SRVC_ADDR))); @@ -742,7 +745,7 @@ public class TestDFSUtil { // it will automatically convert it to hostname HdfsConfiguration conf = new HdfsConfiguration(); conf.set(CommonConfigurationKeys.FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:8020"); - Collection<URI> uris = DFSUtil.getNameServiceUris(conf); + Collection<URI> uris = getInternalNameServiceUris(conf); assertEquals(1, uris.size()); for (URI uri : uris) { assertThat(uri.getHost(), not("127.0.0.1")); @@ -929,10 +932,19 @@ public class TestDFSUtil { conf.set(DFSUtil.addKeySuffixes(DFS_NAMENODE_RPC_ADDRESS_KEY, "nn2"), NN2_ADDRESS); + { + Collection<String> internal = DFSUtil.getInternalNameServices(conf); + assertEquals(Sets.newHashSet("nn1"), internal); + + Collection<String> all = DFSUtil.getNameServiceIds(conf); + assertEquals(Sets.newHashSet("nn1", "nn2"), all); + } + Map<String, Map<String, InetSocketAddress>> nnMap = DFSUtil .getNNServiceRpcAddressesForCluster(conf); assertEquals(1, nnMap.size()); assertTrue(nnMap.containsKey("nn1")); + conf.set(DFS_INTERNAL_NAMESERVICES_KEY, "nn3"); try { DFSUtil.getNNServiceRpcAddressesForCluster(conf); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java index 16dbdfd..fc7c73d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java @@ -599,7 +599,7 @@ public class TestBalancer { waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); // start rebalancing - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = runBalancer(namenodes, p, conf); if (conf.getInt(DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_KEY, DFSConfigKeys.DFS_DATANODE_BALANCE_MAX_NUM_CONCURRENT_MOVES_DEFAULT) ==0) { @@ -795,7 +795,7 @@ public class TestBalancer { new String[]{RACK0}, null,new long[]{CAPACITY}); cluster.triggerHeartbeats(); - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Set<String> datanodes = new HashSet<String>(); datanodes.add(cluster.getDataNodes().get(0).getDatanodeId().getHostName()); Balancer.Parameters p = new Balancer.Parameters( @@ -1229,7 +1229,7 @@ public class TestBalancer { null, null, storageCapacities, null, false, false, false, null); cluster.triggerHeartbeats(); - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); // Run Balancer Balancer.Parameters p = new Balancer.Parameters( http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java index bd91366..6704982 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithHANameNodes.java @@ -94,7 +94,7 @@ public class TestBalancerWithHANameNodes { totalCapacity += newNodeCapacity; TestBalancer.waitForHeartBeat(totalUsedSpace, totalCapacity, client, cluster); - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); assertEquals(1, namenodes.size()); assertTrue(namenodes.contains(HATestUtil.getLogicalUri(cluster))); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java index 6ee6e54..6fd4d5a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithMultipleNameNodes.java @@ -158,7 +158,7 @@ public class TestBalancerWithMultipleNameNodes { LOG.info("BALANCER 1"); // start rebalancing - final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(s.conf); + final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(s.conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, s.conf); Assert.assertEquals(ExitStatus.SUCCESS.getExitCode(), r); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java index 7af3a0e..d6280a3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/balancer/TestBalancerWithNodeGroup.java @@ -174,7 +174,7 @@ public class TestBalancerWithNodeGroup { waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); assertEquals(ExitStatus.SUCCESS.getExitCode(), r); @@ -188,7 +188,7 @@ public class TestBalancerWithNodeGroup { waitForHeartBeat(totalUsedSpace, totalCapacity); // start rebalancing - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); final int r = Balancer.run(namenodes, Balancer.Parameters.DEFAULT, conf); Assert.assertTrue(r == ExitStatus.SUCCESS.getExitCode() || (r == ExitStatus.NO_MOVE_PROGRESS.getExitCode())); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java index 5866c7f..5ed4642 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestMover.java @@ -37,7 +37,7 @@ import org.junit.Test; public class TestMover { static Mover newMover(Configuration conf) throws IOException { - final Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + final Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); final List<NameNodeConnector> nncs = NameNodeConnector.newNameNodeConnectors( @@ -104,7 +104,7 @@ public class TestMover { } Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf); - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); @@ -112,7 +112,7 @@ public class TestMover { Assert.assertNull(movePaths.get(nn)); movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); - namenodes = DFSUtil.getNsServiceRpcUris(conf); + namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, movePaths.size()); nn = namenodes.iterator().next(); Assert.assertTrue(movePaths.containsKey(nn)); @@ -133,7 +133,7 @@ public class TestMover { try { Map<URI, List<Path>> movePaths = Mover.Cli.getNameNodePathsToMove(conf, "-p", "/foo", "/bar"); - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(1, namenodes.size()); Assert.assertEquals(1, movePaths.size()); URI nn = namenodes.iterator().next(); @@ -154,7 +154,7 @@ public class TestMover { final Configuration conf = new HdfsConfiguration(); DFSTestUtil.setFederatedConfiguration(cluster, conf); try { - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(3, namenodes.size()); try { @@ -202,7 +202,7 @@ public class TestMover { final Configuration conf = new HdfsConfiguration(); DFSTestUtil.setFederatedHAConfiguration(cluster, conf); try { - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Assert.assertEquals(3, namenodes.size()); Iterator<URI> iter = namenodes.iterator(); http://git-wip-us.apache.org/repos/asf/hadoop/blob/bddc6cd3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java index 0425dc4..5e7efbe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/mover/TestStorageMover.java @@ -270,7 +270,7 @@ public class TestStorageMover { } private void runMover() throws Exception { - Collection<URI> namenodes = DFSUtil.getNsServiceRpcUris(conf); + Collection<URI> namenodes = DFSUtil.getInternalNsRpcUris(conf); Map<URI, List<Path>> nnMap = Maps.newHashMap(); for (URI nn : namenodes) { nnMap.put(nn, null); --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
