Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java Wed Oct 30 22:21:59 2013 @@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configurat import org.apache.hadoop.fs.AbstractFileSystem; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HAUtil; @@ -47,19 +48,22 @@ import org.apache.hadoop.hdfs.MiniDFSNNT import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector; +import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter; import org.apache.hadoop.io.Text; +import org.apache.hadoop.ipc.RetriableException; +import org.apache.hadoop.ipc.StandbyException; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.test.GenericTestUtils; -import org.junit.AfterClass; +import org.junit.After; import org.junit.Before; -import org.junit.BeforeClass; import org.junit.Test; +import org.mockito.internal.util.reflection.Whitebox; import com.google.common.base.Joiner; @@ -78,8 +82,12 @@ public class TestDelegationTokensWithHA private static DelegationTokenSecretManager dtSecretManager; private static DistributedFileSystem dfs; - @BeforeClass - public static void setupCluster() throws Exception { + private volatile boolean catchup = false; + + @Before + public void setupCluster() throws Exception { + SecurityUtilTestHelper.setTokenServiceUseIp(true); + conf.setBoolean( DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_ALWAYS_USE_KEY, true); conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL, @@ -101,18 +109,12 @@ public class TestDelegationTokensWithHA nn0.getNamesystem()); } - @AfterClass - public static void shutdownCluster() throws IOException { + @After + public void shutdownCluster() throws IOException { if (cluster != null) { cluster.shutdown(); } } - - - @Before - public void prepTest() { - SecurityUtilTestHelper.setTokenServiceUseIp(true); - } @Test public void testDelegationTokenDFSApi() throws Exception { @@ -155,6 +157,96 @@ public class TestDelegationTokensWithHA doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL); } + private class EditLogTailerForTest extends EditLogTailer { + public EditLogTailerForTest(FSNamesystem namesystem, Configuration conf) { + super(namesystem, conf); + } + + public void catchupDuringFailover() throws IOException { + synchronized (TestDelegationTokensWithHA.this) { + while (!catchup) { + try { + LOG.info("The editlog tailer is waiting to catchup..."); + TestDelegationTokensWithHA.this.wait(); + } catch (InterruptedException e) {} + } + } + super.catchupDuringFailover(); + } + } + + /** + * Test if correct exception (StandbyException or RetriableException) can be + * thrown during the NN failover. + */ + @Test + public void testDelegationTokenDuringNNFailover() throws Exception { + EditLogTailer editLogTailer = nn1.getNamesystem().getEditLogTailer(); + // stop the editLogTailer of nn1 + editLogTailer.stop(); + Configuration conf = (Configuration) Whitebox.getInternalState( + editLogTailer, "conf"); + nn1.getNamesystem().setEditLogTailerForTests( + new EditLogTailerForTest(nn1.getNamesystem(), conf)); + + // create token + final Token<DelegationTokenIdentifier> token = + getDelegationToken(fs, "JobTracker"); + DelegationTokenIdentifier identifier = new DelegationTokenIdentifier(); + byte[] tokenId = token.getIdentifier(); + identifier.readFields(new DataInputStream( + new ByteArrayInputStream(tokenId))); + + // Ensure that it's present in the nn0 secret manager and can + // be renewed directly from there. + LOG.info("A valid token should have non-null password, " + + "and should be renewed successfully"); + assertTrue(null != dtSecretManager.retrievePassword(identifier)); + dtSecretManager.renewToken(token, "JobTracker"); + + // transition nn0 to standby + cluster.transitionToStandby(0); + + try { + cluster.getNameNodeRpc(0).renewDelegationToken(token); + fail("StandbyException is expected since nn0 is in standby state"); + } catch (StandbyException e) { + GenericTestUtils.assertExceptionContains( + HAServiceState.STANDBY.toString(), e); + } + + new Thread() { + @Override + public void run() { + try { + cluster.transitionToActive(1); + } catch (Exception e) { + LOG.error("Transition nn1 to active failed", e); + } + } + }.start(); + + Thread.sleep(1000); + try { + nn1.getNamesystem().verifyToken(token.decodeIdentifier(), + token.getPassword()); + fail("RetriableException/StandbyException is expected since nn1 is in transition"); + } catch (IOException e) { + assertTrue(e instanceof StandbyException + || e instanceof RetriableException); + LOG.info("Got expected exception", e); + } + + catchup = true; + synchronized (this) { + this.notifyAll(); + } + + Configuration clientConf = dfs.getConf(); + doRenewOrCancel(token, clientConf, TokenTestAction.RENEW); + doRenewOrCancel(token, clientConf, TokenTestAction.CANCEL); + } + @SuppressWarnings("deprecation") @Test public void testDelegationTokenWithDoAs() throws Exception {
Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestEditLogTailer.java Wed Oct 30 22:21:59 2013 @@ -124,8 +124,8 @@ public class TestEditLogTailer { // Have to specify IPC ports so the NNs can talk to each other. MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setIpcPort(10031)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setIpcPort(10032))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestFailureToReadEdits.java Wed Oct 30 22:21:59 2013 @@ -76,8 +76,8 @@ public class TestFailureToReadEdits { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10041)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10042))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHAFsck.java Wed Oct 30 22:21:59 2013 @@ -52,8 +52,8 @@ public class TestHAFsck { // need some HTTP ports MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ha-nn-uri-0") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10051)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10052))); MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java Wed Oct 30 22:21:59 2013 @@ -17,12 +17,18 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; -import static org.junit.Assert.*; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.IOException; import java.net.InetSocketAddress; import java.net.URI; +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -101,6 +107,50 @@ public class TestHASafeMode { } } + /** + * Make sure the client retries when the active NN is in safemode + */ + @Test (timeout=300000) + public void testClientRetrySafeMode() throws Exception { + final Map<Path, Boolean> results = Collections + .synchronizedMap(new HashMap<Path, Boolean>()); + final Path test = new Path("/test"); + // let nn0 enter safemode + NameNodeAdapter.enterSafeMode(nn0, false); + LOG.info("enter safemode"); + new Thread() { + @Override + public void run() { + try { + boolean mkdir = fs.mkdirs(test); + LOG.info("mkdir finished, result is " + mkdir); + synchronized (TestHASafeMode.this) { + results.put(test, mkdir); + TestHASafeMode.this.notifyAll(); + } + } catch (Exception e) { + LOG.info("Got Exception while calling mkdir", e); + } + } + }.start(); + + // make sure the client's call has actually been handled by the active NN + assertFalse("The directory should not be created while NN in safemode", + fs.exists(test)); + + Thread.sleep(1000); + // let nn0 leave safemode + NameNodeAdapter.leaveSafeMode(nn0); + LOG.info("leave safemode"); + + synchronized (this) { + while (!results.containsKey(test)) { + this.wait(); + } + assertTrue(results.get(test)); + } + } + private void restartStandby() throws IOException { cluster.shutdownNameNode(1); // Set the safemode extension to be lengthy, so that the tests Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestRetryCacheWithHA.java Wed Oct 30 22:21:59 2013 @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.na import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; +import java.io.FileNotFoundException; import java.io.IOException; import java.lang.reflect.InvocationHandler; import java.lang.reflect.Method; @@ -646,10 +647,14 @@ public class TestRetryCacheWithHA { @Override boolean checkNamenodeBeforeReturn() throws Exception { Path linkPath = new Path(link); - FileStatus linkStatus = dfs.getFileLinkStatus(linkPath); + FileStatus linkStatus = null; for (int i = 0; i < CHECKTIMES && linkStatus == null; i++) { - Thread.sleep(1000); - linkStatus = dfs.getFileLinkStatus(linkPath); + try { + linkStatus = dfs.getFileLinkStatus(linkPath); + } catch (FileNotFoundException fnf) { + // Ignoring, this can be legitimate. + Thread.sleep(1000); + } } return linkStatus != null; } @@ -857,4 +862,4 @@ public class TestRetryCacheWithHA { + results.get(op.name)); } } -} \ No newline at end of file +} Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Wed Oct 30 22:21:59 2013 @@ -89,8 +89,8 @@ public class TestStandbyCheckpoints { MiniDFSNNTopology topology = new MiniDFSNNTopology() .addNameservice(new MiniDFSNNTopology.NSConf("ns1") - .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10001)) - .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10002))); + .addNN(new MiniDFSNNTopology.NNConf("nn1").setHttpPort(10061)) + .addNN(new MiniDFSNNTopology.NNConf("nn2").setHttpPort(10062))); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/metrics/TestNameNodeMetrics.java Wed Oct 30 22:21:59 2013 @@ -199,18 +199,11 @@ public class TestNameNodeMetrics { assertCounter("CreateFileOps", 1L, rb); assertCounter("FilesCreated", (long)file.depth(), rb); - // Blocks are stored in a hashmap. Compute its capacity, which - // doubles every time the number of entries reach the threshold. - int threshold = (int)(blockCapacity * BlockManager.DEFAULT_MAP_LOAD_FACTOR); - while (threshold < blockCount) { - blockCapacity <<= 1; - } updateMetrics(); long filesTotal = file.depth() + 1; // Add 1 for root rb = getMetrics(NS_METRICS); assertGauge("FilesTotal", filesTotal, rb); assertGauge("BlocksTotal", blockCount, rb); - assertGauge("BlockCapacity", blockCapacity, rb); fs.delete(file, true); filesTotal--; // reduce the filecount for deleted file Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Wed Oct 30 22:21:59 2013 @@ -23,12 +23,17 @@ import static org.junit.Assert.assertNul import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.ByteArrayOutputStream; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.PrintStream; +import java.security.PrivilegedAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -45,7 +50,9 @@ import org.apache.hadoop.hdfs.server.nam import org.apache.hadoop.hdfs.server.namenode.Quota; import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot.DirectoryDiffList; import org.apache.hadoop.hdfs.util.ReadOnlyList; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.ipc.RemoteException; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.test.GenericTestUtils; import org.junit.After; import org.junit.Before; @@ -777,7 +784,40 @@ public class TestSnapshotDeletion { assertEquals("user1", statusOfS1.getOwner()); assertEquals("group1", statusOfS1.getGroup()); } - + + @Test + public void testDeleteSnapshotWithPermissionsDisabled() throws Exception { + cluster.shutdown(); + Configuration newConf = new Configuration(conf); + newConf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); + cluster = new MiniDFSCluster.Builder(newConf).numDataNodes(0).build(); + cluster.waitActive(); + hdfs = cluster.getFileSystem(); + + final Path path = new Path("/dir"); + hdfs.mkdirs(path); + hdfs.allowSnapshot(path); + hdfs.mkdirs(new Path(path, "/test")); + hdfs.createSnapshot(path, "s1"); + UserGroupInformation anotherUser = UserGroupInformation + .createRemoteUser("anotheruser"); + anotherUser.doAs(new PrivilegedAction<Object>() { + @Override + public Object run() { + DistributedFileSystem anotherUserFS = null; + try { + anotherUserFS = cluster.getFileSystem(); + anotherUserFS.deleteSnapshot(path, "s1"); + } catch (IOException e) { + fail("Failed to delete snapshot : " + e.getLocalizedMessage()); + } finally { + IOUtils.closeStream(anotherUserFS); + } + return null; + } + }); + } + /** * A test covering the case where the snapshot diff to be deleted is renamed * to its previous snapshot. @@ -884,4 +924,29 @@ public class TestSnapshotDeletion { subFile1Status = hdfs.getFileStatus(subFile1SCopy); assertEquals(REPLICATION_1, subFile1Status.getReplication()); } + + @Test + public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + PrintStream psOut = new PrintStream(out); + System.setOut(psOut); + System.setErr(psOut); + FsShell shell = new FsShell(); + shell.setConf(conf); + + String[] argv1 = {"-deleteSnapshot", "/tmp"}; + int val = shell.run(argv1); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv1[0] + ": Incorrect number of arguments.")); + out.reset(); + + String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"}; + val = shell.run(argv2); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv2[0] + ": Incorrect number of arguments.")); + psOut.close(); + out.close(); + } } Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Wed Oct 30 22:21:59 2013 @@ -22,10 +22,13 @@ import static org.junit.Assert.assertFal import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -226,4 +229,29 @@ public class TestSnapshotRename { } } } + + @Test + public void testRenameSnapshotCommandWithIllegalArguments() throws Exception { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + PrintStream psOut = new PrintStream(out); + System.setOut(psOut); + System.setErr(psOut); + FsShell shell = new FsShell(); + shell.setConf(conf); + + String[] argv1 = {"-renameSnapshot", "/tmp", "s1"}; + int val = shell.run(argv1); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv1[0] + ": Incorrect number of arguments.")); + out.reset(); + + String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"}; + val = shell.run(argv2); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv2[0] + ": Incorrect number of arguments.")); + psOut.close(); + out.close(); + } } Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/TestOfflineEditsViewer.java Wed Oct 30 22:21:59 2013 @@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.DFSTestUti import org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes; import org.apache.hadoop.hdfs.server.namenode.OfflineEditsViewerHelper; import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer.Flags; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -53,7 +54,7 @@ public class TestOfflineEditsViewer { } private static String buildDir = - System.getProperty("test.build.data", "build/test/data"); + PathUtils.getTestDirName(TestOfflineEditsViewer.class); private static String cacheDir = System.getProperty("test.cache.data", "build/test/cache"); Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestDelimitedImageVisitor.java Wed Oct 30 22:21:59 2013 @@ -27,6 +27,7 @@ import java.io.FileReader; import java.io.IOException; import org.apache.hadoop.hdfs.tools.offlineImageViewer.ImageVisitor.ImageElement; +import org.apache.hadoop.test.PathUtils; import org.junit.Test; /** @@ -34,7 +35,7 @@ import org.junit.Test; * on predetermined inputs */ public class TestDelimitedImageVisitor { - private static String ROOT = System.getProperty("test.build.data","/tmp"); + private static String ROOT = PathUtils.getTestDirName(TestDelimitedImageVisitor.class); private static final String delim = "--"; // Record an element in the visitor and build the expected line in the output Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewer.java Wed Oct 30 22:21:59 2013 @@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.MiniDFSClu import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction; import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil; import org.apache.hadoop.security.token.Token; +import org.apache.hadoop.test.PathUtils; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -88,8 +89,7 @@ public class TestOfflineImageViewer { final static HashMap<String, FileStatus> writtenFiles = new HashMap<String, FileStatus>(); - private static String ROOT = System.getProperty("test.build.data", - "build/test/data"); + private static String ROOT = PathUtils.getTestDirName(TestOfflineImageViewer.class); // Create a populated namespace for later testing. Save its contents to a // data structure and store its fsimage location. Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestAtomicFileOutputStream.java Wed Oct 30 22:21:59 2013 @@ -30,6 +30,7 @@ import java.io.OutputStream; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; @@ -40,10 +41,7 @@ public class TestAtomicFileOutputStream private static final String TEST_STRING = "hello world"; private static final String TEST_STRING_2 = "goodbye world"; - private static File BASE_DIR = new File( - System.getProperty("test.build.data", "build/test/data")); - private static File TEST_DIR = new File(BASE_DIR, - TestAtomicFileOutputStream.class.getName()); + private static File TEST_DIR = PathUtils.getTestDir(TestAtomicFileOutputStream.class); private static File DST_FILE = new File(TEST_DIR, "test.txt"); Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestMD5FileUtils.java Wed Oct 30 22:21:59 2013 @@ -29,14 +29,12 @@ import java.io.IOException; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.io.MD5Hash; +import org.apache.hadoop.test.PathUtils; import org.junit.Before; import org.junit.Test; public class TestMD5FileUtils { - private static final File TEST_DIR_ROOT = new File( - System.getProperty("test.build.data","build/test/data")); - private static final File TEST_DIR = new File(TEST_DIR_ROOT, - "TestMD5FileUtils"); + private static final File TEST_DIR = PathUtils.getTestDir(TestMD5FileUtils.class); private static final File TEST_FILE = new File(TEST_DIR, "testMd5File.dat"); Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java Wed Oct 30 22:21:59 2013 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs.web; +import java.util.HashMap; import java.util.Map; import org.apache.hadoop.fs.FileStatus; @@ -58,4 +59,27 @@ public class TestJsonUtil { System.out.println("fs2 = " + fs2); Assert.assertEquals(fstatus, fs2); } + + @Test + public void testToDatanodeInfoWithoutSecurePort() { + Map<String, Object> response = new HashMap<String, Object>(); + + response.put("ipAddr", "127.0.0.1"); + response.put("hostName", "localhost"); + response.put("storageID", "fake-id"); + response.put("xferPort", 1337l); + response.put("infoPort", 1338l); + // deliberately don't include an entry for "infoSecurePort" + response.put("ipcPort", 1339l); + response.put("capacity", 1024l); + response.put("dfsUsed", 512l); + response.put("remaining", 512l); + response.put("blockPoolUsed", 512l); + response.put("lastUpdate", 0l); + response.put("xceiverCount", 4096l); + response.put("networkLocation", "foo.bar.baz"); + response.put("adminState", "NORMAL"); + + JsonUtil.toDatanodeInfo(response); + } } Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsTimeouts.java Wed Oct 30 22:21:59 2013 @@ -25,9 +25,11 @@ import java.io.IOException; import java.io.InputStream; import java.io.InputStreamReader; import java.io.OutputStream; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.ServerSocket; import java.net.Socket; +import java.net.SocketAddress; import java.net.SocketTimeoutException; import java.nio.channels.SocketChannel; import java.util.ArrayList; @@ -41,6 +43,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.server.namenode.NameNode; import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.net.NetUtils; @@ -58,7 +61,6 @@ public class TestWebHdfsTimeouts { private static final int CLIENTS_TO_CONSUME_BACKLOG = 100; private static final int CONNECTION_BACKLOG = 1; - private static final int INITIAL_SOCKET_TIMEOUT = URLUtils.SOCKET_TIMEOUT; private static final int SHORT_SOCKET_TIMEOUT = 5; private static final int TEST_TIMEOUT = 10000; @@ -67,20 +69,23 @@ public class TestWebHdfsTimeouts { private InetSocketAddress nnHttpAddress; private ServerSocket serverSocket; private Thread serverThread; + private URLConnectionFactory connectionFactory = new URLConnectionFactory(SHORT_SOCKET_TIMEOUT); @Before public void setUp() throws Exception { - URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT; Configuration conf = WebHdfsTestUtil.createConf(); - nnHttpAddress = NameNode.getHttpAddress(conf); - serverSocket = new ServerSocket(nnHttpAddress.getPort(), CONNECTION_BACKLOG); + serverSocket = new ServerSocket(0, CONNECTION_BACKLOG); + nnHttpAddress = new InetSocketAddress("localhost", serverSocket.getLocalPort()); + conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "localhost:" + serverSocket.getLocalPort()); fs = WebHdfsTestUtil.getWebHdfsFileSystem(conf); + fs.connectionFactory = connectionFactory; clients = new ArrayList<SocketChannel>(); serverThread = null; } @After public void tearDown() throws Exception { + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; IOUtils.cleanup(LOG, clients.toArray(new SocketChannel[clients.size()])); IOUtils.cleanup(LOG, fs); if (serverSocket != null) { @@ -240,7 +245,7 @@ public class TestWebHdfsTimeouts { */ private void startSingleTemporaryRedirectResponseThread( final boolean consumeConnectionBacklog) { - URLUtils.SOCKET_TIMEOUT = INITIAL_SOCKET_TIMEOUT; + fs.connectionFactory = URLConnectionFactory.DEFAULT_CONNECTION_FACTORY; serverThread = new Thread() { @Override public void run() { @@ -254,7 +259,7 @@ public class TestWebHdfsTimeouts { clientSocket = serverSocket.accept(); // Immediately setup conditions for subsequent connections. - URLUtils.SOCKET_TIMEOUT = SHORT_SOCKET_TIMEOUT; + fs.connectionFactory = connectionFactory; if (consumeConnectionBacklog) { consumeConnectionBacklog(); } Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/WebHdfsTestUtil.java Wed Oct 30 22:21:59 2013 @@ -81,7 +81,7 @@ public class WebHdfsTestUtil { public static HttpURLConnection twoStepWrite(final WebHdfsFileSystem webhdfs, final HttpOpParam.Op op, HttpURLConnection conn) throws IOException { - return webhdfs.new Runner(op, conn).twoStepWrite(); + return webhdfs.new ConnRunner(op, conn).twoStepWrite(); } public static FSDataOutputStream write(final WebHdfsFileSystem webhdfs, Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestJMXGet.java Wed Oct 30 22:21:59 2013 @@ -21,9 +21,13 @@ package org.apache.hadoop.tools; import static org.apache.hadoop.test.MetricsAsserts.assertGauge; import static org.apache.hadoop.test.MetricsAsserts.getMetrics; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; import java.io.File; import java.io.IOException; +import java.io.PipedInputStream; +import java.io.PipedOutputStream; +import java.io.PrintStream; import java.util.Random; import org.apache.hadoop.conf.Configuration; @@ -103,6 +107,7 @@ public class TestJMXGet { //jmx.init(); //jmx = new JMXGet(); jmx.init(); // default lists namenode mbeans only + assertTrue("error printAllValues", checkPrintAllValues(jmx)); //get some data from different source assertEquals(numDatanodes, Integer.parseInt( @@ -114,7 +119,24 @@ public class TestJMXGet { cluster.shutdown(); } - + + private static boolean checkPrintAllValues(JMXGet jmx) throws Exception { + int size = 0; + byte[] bytes = null; + String pattern = "List of all the available keys:"; + PipedOutputStream pipeOut = new PipedOutputStream(); + PipedInputStream pipeIn = new PipedInputStream(pipeOut); + System.setErr(new PrintStream(pipeOut)); + jmx.printAllValues(); + if ((size = pipeIn.available()) != 0) { + bytes = new byte[size]; + pipeIn.read(bytes, 0, bytes.length); + } + pipeOut.close(); + pipeIn.close(); + return bytes != null ? new String(bytes).contains(pattern) : false; + } + /** * test JMX connection to DataNode.. * @throws Exception Modified: hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml?rev=1537330&r1=1537329&r2=1537330&view=diff ============================================================================== --- hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml (original) +++ hadoop/common/branches/YARN-321/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/testHDFSConf.xml Wed Oct 30 22:21:59 2013 @@ -1043,6 +1043,7 @@ <test> <!-- TESTED --> <description>ls: Negative test for quoted /*/* globbing </description> + <windows>false</windows> <test-commands> <command>-fs NAMENODE -mkdir /dir0</command> <command>-fs NAMENODE -mkdir /dir0/dir1</command> @@ -1062,6 +1063,7 @@ <test> <!-- TESTED --> <description>ls: Test for quoted globbing </description> + <windows>false</windows> <test-commands> <command>-fs NAMENODE -mkdir /dir0</command> <command>-fs NAMENODE -mkdir /dir0/\*</command> @@ -1082,6 +1084,7 @@ <test> <!-- TESTED --> <description>rm: Test for quoted globbing </description> + <windows>false</windows> <test-commands> <command>-fs NAMENODE -mkdir /dir0</command> <command>-fs NAMENODE -mkdir /dir0/\*</command> @@ -6049,7 +6052,7 @@ <command>-fs NAMENODE -mkdir /dir0</command> <command>-fs NAMENODE -touchz /dir0/file0</command> <command>-fs NAMENODE -touchz /dir0/file1</command> - <command>-fs NAMENODE -setrep -R 2 /dir0</command> + <command>-fs NAMENODE -setrep 2 /dir0</command> </test-commands> <cleanup-commands> <command>-fs NAMENODE -rm -r /user</command> @@ -6072,7 +6075,7 @@ <command>-fs NAMENODE -mkdir -p dir0</command> <command>-fs NAMENODE -touchz dir0/file0</command> <command>-fs NAMENODE -touchz dir0/file1</command> - <command>-fs NAMENODE -setrep -R 2 dir0</command> + <command>-fs NAMENODE -setrep 2 dir0</command> </test-commands> <cleanup-commands> <command>-fs NAMENODE -rm -r /user</command> @@ -6090,6 +6093,24 @@ </test> <test> <!-- TESTED --> + <description>setrep: -R ignored for existing file</description> + <test-commands> + <command>-fs NAMENODE -mkdir -p dir0</command> + <command>-fs NAMENODE -touchz dir0/file0</command> + <command>-fs NAMENODE -setrep -R 2 dir0/file0</command> + </test-commands> + <cleanup-commands> + <command>-fs NAMENODE -rm -r /user</command> + </cleanup-commands> + <comparators> + <comparator> + <type>RegexpComparator</type> + <expected-output>^Replication 2 set: dir0/file0</expected-output> + </comparator> + </comparators> + </test> + + <test> <!-- TESTED --> <description>setrep: non existent file (absolute path)</description> <test-commands> <command>-fs NAMENODE -setrep 2 /dir0/file</command> @@ -6145,7 +6166,7 @@ <command>-fs NAMENODE -mkdir hdfs:///dir0/</command> <command>-fs NAMENODE -touchz hdfs:///dir0/file0</command> <command>-fs NAMENODE -touchz hdfs:///dir0/file1</command> - <command>-fs NAMENODE -setrep -R 2 hdfs:///dir0</command> + <command>-fs NAMENODE -setrep 2 hdfs:///dir0</command> </test-commands> <cleanup-commands> <command>-fs NAMENODE -rm -r hdfs:///*</command> @@ -6203,7 +6224,7 @@ <command>-fs NAMENODE -mkdir -p NAMENODE/dir0</command> <command>-fs NAMENODE -touchz NAMENODE/dir0/file0</command> <command>-fs NAMENODE -touchz NAMENODE/dir0/file1</command> - <command>-fs NAMENODE -setrep -R 2 NAMENODE/dir0</command> + <command>-fs NAMENODE -setrep 2 NAMENODE/dir0</command> </test-commands> <cleanup-commands> <command>-fs NAMENODE -rm -r NAMENODE/*</command>