Author: arp Date: Thu Oct 17 18:54:53 2013 New Revision: 1533208 URL: http://svn.apache.org/r1533208 Log: Merging r1532911 through r1533207 from trunk to branch HDFS-2832
Added: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js - copied unchanged from r1533207, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfs-dust.js hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html - copied unchanged from r1533207, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html - copied unchanged from r1533207, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js - copied unchanged from r1533207, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-full-2.0.0.min.js - copied unchanged from r1533207, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-full-2.0.0.min.js hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-helpers-1.1.1.min.js - copied unchanged from r1533207, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/static/dust-helpers-1.1.1.min.js Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1532911-1533207 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Oct 17 18:54:53 2013 @@ -255,7 +255,12 @@ Release 2.3.0 - UNRELEASED HDFS-5342. Provide more information in the FSNamesystem JMX interfaces. (Haohui Mai via jing9) - + + HDFS-5334. Implement dfshealth.jsp in HTML pages. (Haohui Mai via jing9) + + HDFS-5379. Update links to datanode information in dfshealth.html. (Haohui + Mai via jing9) + IMPROVEMENTS HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) @@ -352,6 +357,13 @@ Release 2.3.0 - UNRELEASED HDFS-5283. Under construction blocks only inside snapshots should not be counted in safemode threshhold. (Vinay via szetszwo) + HDFS-4376. Fix race conditions in Balancer. (Junping Du via szetszwo) + + HDFS-5375. hdfs.cmd does not expose several snapshot commands. (cnauroth) + + HDFS-5336. DataNode should not output 'StartupProgress' metrics. + (Akira Ajisaka via cnauroth) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES @@ -360,6 +372,9 @@ Release 2.2.1 - UNRELEASED IMPROVEMENTS + HDFS-5360. Improvement of usage message of renameSnapshot and + deleteSnapshot. (Shinichi Yamashita via wang) + OPTIMIZATIONS BUG FIXES @@ -3674,6 +3689,9 @@ Release 0.23.10 - UNRELEASED HDFS-5010. Reduce the frequency of getCurrentUser() calls from namenode (kihwal) + HDFS-5346. Avoid unnecessary call to getNumLiveDataNodes() for each block + during IBR processing (Ravi Prakash via kihwal) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu Oct 17 18:54:53 2013 @@ -542,6 +542,9 @@ http://maven.apache.org/xsd/maven-4.0.0. <exclude>src/main/docs/releasenotes.html</exclude> <exclude>src/contrib/**</exclude> <exclude>src/site/resources/images/*</exclude> + <exclude>src/main/webapps/static/dust-full-2.0.0.min.js</exclude> + <exclude>src/main/webapps/static/dust-helpers-1.1.1.min.js</exclude> + <exclude>src/main/webapps/hdfs/dfshealth.dust.html</exclude> </excludes> </configuration> </plugin> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs.cmd Thu Oct 17 18:54:53 2013 @@ -123,6 +123,14 @@ goto :eof set CLASS=org.apache.hadoop.hdfs.tools.GetGroups goto :eof +:snapshotDiff + set CLASS=org.apache.hadoop.hdfs.tools.snapshot.SnapshotDiff + goto :eof + +:lsSnapshottableDir + set CLASS=org.apache.hadoop.hdfs.tools.snapshot.LsSnapshottableDir + goto :eof + @rem This changes %1, %2 etc. Hence those cannot be used after calling this. :make_command_arguments if "%1" == "--config" ( @@ -164,7 +172,10 @@ goto :eof @echo fetchdt fetch a delegation token from the NameNode @echo getconf get config values from configuration @echo groups get the groups which users belong to - @echo Use -help to see options + @echo snapshotDiff diff two snapshots of a directory or diff the + @echo current directory contents with a snapshot + @echo lsSnapshottableDir list all snapshottable dirs owned by the current user + @echo Use -help to see options @echo. @echo Most commands print help when invoked w/o parameters. Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1532911-1533207 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java Thu Oct 17 18:54:53 2013 @@ -506,7 +506,7 @@ public class Balancer { final DatanodeInfo datanode; final double utilization; final long maxSize2Move; - protected long scheduledSize = 0L; + private long scheduledSize = 0L; // blocks being moved but not confirmed yet private List<PendingBlockMove> pendingBlocks = new ArrayList<PendingBlockMove>(MAX_NUM_CONCURRENT_MOVES); @@ -555,20 +555,35 @@ public class Balancer { } /** Decide if still need to move more bytes */ - protected boolean hasSpaceForScheduling() { + protected synchronized boolean hasSpaceForScheduling() { return scheduledSize<maxSize2Move; } /** Return the total number of bytes that need to be moved */ - protected long availableSizeToMove() { + protected synchronized long availableSizeToMove() { return maxSize2Move-scheduledSize; } - /* increment scheduled size */ - protected void incScheduledSize(long size) { + /** increment scheduled size */ + protected synchronized void incScheduledSize(long size) { scheduledSize += size; } + /** decrement scheduled size */ + protected synchronized void decScheduledSize(long size) { + scheduledSize -= size; + } + + /** get scheduled size */ + protected synchronized long getScheduledSize(){ + return scheduledSize; + } + + /** get scheduled size */ + protected synchronized void setScheduledSize(long size){ + scheduledSize = size; + } + /* Check if the node can schedule more blocks to move */ synchronized private boolean isPendingQNotFull() { if ( pendingBlocks.size() < MAX_NUM_CONCURRENT_MOVES ) { @@ -702,8 +717,8 @@ public class Balancer { pendingBlock.source = this; pendingBlock.target = target; if ( pendingBlock.chooseBlockAndProxy() ) { - long blockSize = pendingBlock.block.getNumBytes(); - scheduledSize -= blockSize; + long blockSize = pendingBlock.block.getNumBytes(); + decScheduledSize(blockSize); task.size -= blockSize; if (task.size == 0) { tasks.remove(); @@ -747,10 +762,11 @@ public class Balancer { private static final long MAX_ITERATION_TIME = 20*60*1000L; //20 mins private void dispatchBlocks() { long startTime = Time.now(); + long scheduledSize = getScheduledSize(); this.blocksToReceive = 2*scheduledSize; boolean isTimeUp = false; int noPendingBlockIteration = 0; - while(!isTimeUp && scheduledSize>0 && + while(!isTimeUp && getScheduledSize()>0 && (!srcBlockList.isEmpty() || blocksToReceive>0)) { PendingBlockMove pendingBlock = chooseNextBlockToMove(); if (pendingBlock != null) { @@ -779,7 +795,7 @@ public class Balancer { // in case no blocks can be moved for source node's task, // jump out of while-loop after 5 iterations. if (noPendingBlockIteration >= MAX_NO_PENDING_BLOCK_ITERATIONS) { - scheduledSize = 0; + setScheduledSize(0); } } @@ -981,7 +997,7 @@ public class Balancer { long bytesToMove = 0L; for (Source src : sources) { - bytesToMove += src.scheduledSize; + bytesToMove += src.getScheduledSize(); } return bytesToMove; } @@ -1082,7 +1098,7 @@ public class Balancer { bytesMoved += bytes; } - private long get() { + private synchronized long get() { return bytesMoved; } }; Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Thu Oct 17 18:54:53 2013 @@ -4613,7 +4613,7 @@ public class FSNamesystem implements Nam */ private boolean needEnter() { return (threshold != 0 && blockSafe < blockThreshold) || - (getNumLiveDataNodes() < datanodeThreshold) || + (datanodeThreshold != 0 && getNumLiveDataNodes() < datanodeThreshold) || (!nameNodeHasResourcesAvailable()); } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java Thu Oct 17 18:54:53 2013 @@ -270,10 +270,6 @@ public class NameNode implements NameNod static NameNodeMetrics metrics; private static final StartupProgress startupProgress = new StartupProgress(); - static { - StartupProgressMetrics.register(startupProgress); - } - /** Return the {@link FSNamesystem} object. * @return {@link FSNamesystem} object. */ @@ -485,6 +481,7 @@ public class NameNode implements NameNod loginAsNameNodeUser(conf); NameNode.initMetrics(conf, this.getRole()); + StartupProgressMetrics.register(startupProgress); if (NamenodeRole.NAMENODE == role) { startHttpServer(conf); Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1532877-1533207 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java Thu Oct 17 18:54:53 2013 @@ -19,8 +19,10 @@ package org.apache.hadoop.fs; import static org.apache.hadoop.fs.FileContextTestHelper.exists; +import static org.junit.Assert.fail; import java.io.IOException; +import java.net.URI; import java.net.URISyntaxException; import javax.security.auth.login.LoginException; @@ -55,7 +57,8 @@ public class TestHDFSFileContextMainOper LoginException, URISyntaxException { cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build(); cluster.waitClusterUp(); - fc = FileContext.getFileContext(cluster.getURI(0), CONF); + URI uri0 = cluster.getURI(0); + fc = FileContext.getFileContext(uri0, CONF); defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + UserGroupInformation.getCurrentUser().getShortUserName())); fc.mkdir(defaultWorkingDirectory, FileContext.DEFAULT_PERM, true); @@ -77,7 +80,10 @@ public class TestHDFSFileContextMainOper @AfterClass public static void ClusterShutdownAtEnd() throws Exception { - cluster.shutdown(); + if (cluster != null) { + cluster.shutdown(); + cluster = null; + } } @Override @@ -111,7 +117,7 @@ public class TestHDFSFileContextMainOper @Test public void testOldRenameWithQuota() throws Exception { - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); Path src1 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src1"); Path src2 = getTestRootPath(fc, "test/testOldRenameWithQuota/srcdir/src2"); Path dst1 = getTestRootPath(fc, "test/testOldRenameWithQuota/dstdir/dst1"); @@ -146,7 +152,7 @@ public class TestHDFSFileContextMainOper @Test public void testRenameWithQuota() throws Exception { - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); Path src1 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src1"); Path src2 = getTestRootPath(fc, "test/testRenameWithQuota/srcdir/src2"); Path dst1 = getTestRootPath(fc, "test/testRenameWithQuota/dstdir/dst1"); @@ -210,7 +216,7 @@ public class TestHDFSFileContextMainOper */ @Test public void testEditsLogOldRename() throws Exception { - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); Path src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1"); Path dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1"); createFile(src1); @@ -226,7 +232,7 @@ public class TestHDFSFileContextMainOper // Restart the cluster and ensure the above operations can be // loaded from the edits log restartCluster(); - fs = (DistributedFileSystem)cluster.getFileSystem(); + fs = cluster.getFileSystem(); src1 = getTestRootPath(fc, "testEditsLogOldRename/srcdir/src1"); dst1 = getTestRootPath(fc, "testEditsLogOldRename/dstdir/dst1"); Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed @@ -239,7 +245,7 @@ public class TestHDFSFileContextMainOper */ @Test public void testEditsLogRename() throws Exception { - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); Path src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1"); Path dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1"); createFile(src1); @@ -255,7 +261,7 @@ public class TestHDFSFileContextMainOper // Restart the cluster and ensure the above operations can be // loaded from the edits log restartCluster(); - fs = (DistributedFileSystem)cluster.getFileSystem(); + fs = cluster.getFileSystem(); src1 = getTestRootPath(fc, "testEditsLogRename/srcdir/src1"); dst1 = getTestRootPath(fc, "testEditsLogRename/dstdir/dst1"); Assert.assertFalse(fs.exists(src1)); // ensure src1 is already renamed @@ -279,7 +285,7 @@ public class TestHDFSFileContextMainOper private void oldRename(Path src, Path dst, boolean renameSucceeds, boolean exception) throws Exception { - DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem(); + DistributedFileSystem fs = cluster.getFileSystem(); try { Assert.assertEquals(renameSucceeds, fs.rename(src, dst)); } catch (Exception ex) { @@ -301,4 +307,23 @@ public class TestHDFSFileContextMainOper Assert.assertEquals(renameSucceeds, !exists(fc, src)); Assert.assertEquals((dstExists||renameSucceeds), exists(fc, dst)); } + + @Override + protected boolean listCorruptedBlocksSupported() { + return true; + } + + @Test + public void testCrossFileSystemRename() throws IOException { + try { + fc.rename( + new Path("hdfs://127.0.0.1/aaa/bbb/Foo"), + new Path("file://aaa/bbb/Moo"), + Options.Rename.OVERWRITE); + fail("IOexception expected."); + } catch (IOException ioe) { + // okay + } + } + } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java Thu Oct 17 18:54:53 2013 @@ -23,12 +23,15 @@ import static org.junit.Assert.assertNul import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.ByteArrayOutputStream; import java.io.FileNotFoundException; import java.io.IOException; +import java.io.PrintStream; import java.security.PrivilegedAction; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSTestUtil; @@ -921,4 +924,29 @@ public class TestSnapshotDeletion { subFile1Status = hdfs.getFileStatus(subFile1SCopy); assertEquals(REPLICATION_1, subFile1Status.getReplication()); } + + @Test + public void testDeleteSnapshotCommandWithIllegalArguments() throws Exception { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + PrintStream psOut = new PrintStream(out); + System.setOut(psOut); + System.setErr(psOut); + FsShell shell = new FsShell(); + shell.setConf(conf); + + String[] argv1 = {"-deleteSnapshot", "/tmp"}; + int val = shell.run(argv1); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv1[0] + ": Incorrect number of arguments.")); + out.reset(); + + String[] argv2 = {"-deleteSnapshot", "/tmp", "s1", "s2"}; + val = shell.run(argv2); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv2[0] + ": Incorrect number of arguments.")); + psOut.close(); + out.close(); + } } Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java?rev=1533208&r1=1533207&r2=1533208&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotRename.java Thu Oct 17 18:54:53 2013 @@ -22,10 +22,13 @@ import static org.junit.Assert.assertFal import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; +import java.io.ByteArrayOutputStream; +import java.io.PrintStream; import java.util.List; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FsShell; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -226,4 +229,29 @@ public class TestSnapshotRename { } } } + + @Test + public void testRenameSnapshotCommandWithIllegalArguments() throws Exception { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + PrintStream psOut = new PrintStream(out); + System.setOut(psOut); + System.setErr(psOut); + FsShell shell = new FsShell(); + shell.setConf(conf); + + String[] argv1 = {"-renameSnapshot", "/tmp", "s1"}; + int val = shell.run(argv1); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv1[0] + ": Incorrect number of arguments.")); + out.reset(); + + String[] argv2 = {"-renameSnapshot", "/tmp", "s1", "s2", "s3"}; + val = shell.run(argv2); + assertTrue(val == -1); + assertTrue(out.toString().contains( + argv2[0] + ": Incorrect number of arguments.")); + psOut.close(); + out.close(); + } }