Modified: hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java?rev=697284&r1=697283&r2=697284&view=diff ============================================================================== --- hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java (original) +++ hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/tools/DFSAdmin.java Fri Sep 19 16:40:54 2008 @@ -94,7 +94,7 @@ @Override public void run(Path path) throws IOException { - dfs.clearQuota(path); + dfs.setQuota(path, FSConstants.QUOTA_RESET, FSConstants.QUOTA_DONT_SET); } } @@ -141,7 +141,111 @@ @Override public void run(Path path) throws IOException { - dfs.setQuota(path, quota); + dfs.setQuota(path, quota, FSConstants.QUOTA_DONT_SET); + } + } + + /** A class that supports command clearSpaceQuota */ + private static class ClearSpaceQuotaCommand extends DFSAdminCommand { + private static final String NAME = "clrSpaceQuota"; + private static final String USAGE = "-"+NAME+" <dirname>...<dirname>"; + private static final String DESCRIPTION = USAGE + ": " + + "\tClear the disk space quota for each directory <dirName>.\n" + + "\t\tBest effort for the directory. with fault reported if\n" + + "\t\t1. the directory does not exist or is a file, or\n" + + "\t\t2. user is not an administrator.\n" + + "\t\tIt does not fault if the directory has no quota."; + + /** Constructor */ + ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { + super(fs); + CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE); + List<String> parameters = c.parse(args, pos); + this.args = parameters.toArray(new String[parameters.size()]); + } + + /** Check if a command is the clrQuota command + * + * @param cmd A string representation of a command starting with "-" + * @return true if this is a clrQuota command; false otherwise + */ + public static boolean matches(String cmd) { + return ("-"+NAME).equals(cmd); + } + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public void run(Path path) throws IOException { + dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, FSConstants.QUOTA_RESET); + } + } + + /** A class that supports command setQuota */ + private static class SetSpaceQuotaCommand extends DFSAdminCommand { + private static final String NAME = "setSpaceQuota"; + private static final String USAGE = + "-"+NAME+" <quota> <dirname>...<dirname>"; + private static final String DESCRIPTION = USAGE + + "\tSet the dik space quota <quota> for each directory <dirName>.\n" + + "\t\tThe directory quota is a long integer that puts a hard limit " + + "on the number of names in the directory tree.\n" + + "\t\tQuota can also be speciefied with MB, GB, or TB suffix" + + " (e.g. 100GB, 20TB).\n" + + "\t\tBest effort for the directory, with faults reported if\n" + + "\t\t1. N is not a positive integer, or\n" + + "\t\t2. user is not an administrator, or\n" + + "\t\t3. the directory does not exist or is a file, or\n" + + "\t\t4. the directory would immediately exceed the new space quota."; + + private long quota; // the quota to be set + + /** Constructor */ + SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) { + super(fs); + CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE); + List<String> parameters = c.parse(args, pos); + long multiplier = 1; + String str = parameters.remove(0).trim(); + if (str.endsWith("TB")) { + multiplier = 1024 * 1024 * 1024 * 1024; + } else if (str.endsWith("GB")) { + multiplier = 1024 * 1024 * 1024; + } else if (str.endsWith("MB")) { + multiplier = 1024 * 1024; + } + if (multiplier != 1) { + str = str.substring(0, str.length()-2); + } + + quota = Long.parseLong(str); + if (quota > Long.MAX_VALUE/multiplier) { + throw new IllegalArgumentException("quota exceeds Long.MAX_VALUE!"); + } + quota *= multiplier; + this.args = parameters.toArray(new String[parameters.size()]); + } + + /** Check if a command is the setQuota command + * + * @param cmd A string representation of a command starting with "-" + * @return true if this is a count command; false otherwise + */ + public static boolean matches(String cmd) { + return ("-"+NAME).equals(cmd); + } + + @Override + public String getCommandName() { + return NAME; + } + + @Override + public void run(Path path) throws IOException { + dfs.setQuota(path, FSConstants.QUOTA_DONT_SET, quota); } } @@ -293,6 +397,8 @@ "\t[-refreshNodes]\n" + "\t[" + SetQuotaCommand.USAGE + "]\n" + "\t[" + ClearQuotaCommand.USAGE +"]\n" + + "\t[" + SetSpaceQuotaCommand.USAGE + "]\n" + + "\t[" + ClearSpaceQuotaCommand.USAGE +"]\n" + "\t[-help [cmd]]\n"; String report ="-report: \tReports basic filesystem information and statistics.\n"; @@ -354,6 +460,10 @@ System.out.println(SetQuotaCommand.DESCRIPTION); } else if (ClearQuotaCommand.matches(cmd)) { System.out.println(ClearQuotaCommand.DESCRIPTION); + } else if (SetSpaceQuotaCommand.matches(cmd)) { + System.out.println(SetSpaceQuotaCommand.DESCRIPTION); + } else if (ClearSpaceQuotaCommand.matches(cmd)) { + System.out.println(ClearSpaceQuotaCommand.DESCRIPTION); } else if ("help".equals(cmd)) { System.out.println(help); } else { @@ -366,6 +476,8 @@ System.out.println(metaSave); System.out.println(SetQuotaCommand.DESCRIPTION); System.out.println(ClearQuotaCommand.DESCRIPTION); + System.out.println(SetSpaceQuotaCommand.DESCRIPTION); + System.out.println(ClearSpaceQuotaCommand.DESCRIPTION); System.out.println(help); System.out.println(); ToolRunner.printGenericCommandUsage(System.out); @@ -478,6 +590,12 @@ } else if (ClearQuotaCommand.matches(cmd)) { System.err.println("Usage: java DFSAdmin" + " ["+ClearQuotaCommand.USAGE+"]"); + } else if (SetSpaceQuotaCommand.matches(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " [" + SetSpaceQuotaCommand.USAGE+"]"); + } else if (ClearSpaceQuotaCommand.matches(cmd)) { + System.err.println("Usage: java DFSAdmin" + + " ["+ClearSpaceQuotaCommand.USAGE+"]"); } else { System.err.println("Usage: java DFSAdmin"); System.err.println(" [-report]"); @@ -488,6 +606,8 @@ System.err.println(" [-metasave filename]"); System.err.println(" ["+SetQuotaCommand.USAGE+"]"); System.err.println(" ["+ClearQuotaCommand.USAGE+"]"); + System.err.println(" ["+SetSpaceQuotaCommand.USAGE+"]"); + System.err.println(" ["+ClearSpaceQuotaCommand.USAGE+"]"); System.err.println(" [-help [cmd]]"); System.err.println(); ToolRunner.printGenericCommandUsage(System.err); @@ -576,6 +696,10 @@ exitCode = new ClearQuotaCommand(argv, i, fs).runAll(); } else if (SetQuotaCommand.matches(cmd)) { exitCode = new SetQuotaCommand(argv, i, fs).runAll(); + } else if (ClearSpaceQuotaCommand.matches(cmd)) { + exitCode = new ClearSpaceQuotaCommand(argv, i, fs).runAll(); + } else if (SetSpaceQuotaCommand.matches(cmd)) { + exitCode = new SetSpaceQuotaCommand(argv, i, fs).runAll(); } else if ("-help".equals(cmd)) { if (i < argv.length) { printHelp(argv[i]);
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java?rev=697284&r1=697283&r2=697284&view=diff ============================================================================== --- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java (original) +++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/DFSTestUtil.java Fri Sep 19 16:40:54 2008 @@ -24,9 +24,9 @@ import java.io.IOException; import java.util.Random; import junit.framework.TestCase; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; @@ -124,18 +124,24 @@ throw new IOException("Mkdirs failed to create " + fileName.getParent().toString()); } - FSDataOutputStream out = fs.create(fileName, replFactor); - byte[] toWrite = new byte[1024]; - Random rb = new Random(seed); - long bytesToWrite = fileLen; - while (bytesToWrite>0) { - rb.nextBytes(toWrite); - int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite; + FSDataOutputStream out = null; + try { + out = fs.create(fileName, replFactor); + byte[] toWrite = new byte[1024]; + Random rb = new Random(seed); + long bytesToWrite = fileLen; + while (bytesToWrite>0) { + rb.nextBytes(toWrite); + int bytesToWriteNext = (1024<bytesToWrite)?1024:(int)bytesToWrite; - out.write(toWrite, 0, bytesToWriteNext); - bytesToWrite -= bytesToWriteNext; + out.write(toWrite, 0, bytesToWriteNext); + bytesToWrite -= bytesToWriteNext; + } + out.close(); + out = null; + } finally { + IOUtils.closeStream(out); } - out.close(); } /** check if the files have been copied correctly. */ Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java URL: http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java?rev=697284&r1=697283&r2=697284&view=diff ============================================================================== --- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java (original) +++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestQuota.java Fri Sep 19 16:40:54 2008 @@ -23,14 +23,22 @@ import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.FSConstants; import org.apache.hadoop.hdfs.protocol.QuotaExceededException; import org.apache.hadoop.hdfs.tools.DFSAdmin; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.security.UnixUserGroupInformation; import junit.framework.TestCase; /** A class for testing quota-related commands */ public class TestQuota extends TestCase { + + private void runCommand(DFSAdmin admin, boolean expectError, String... args) + throws Exception { + runCommand(admin, args, expectError); + } + private void runCommand(DFSAdmin admin, String args[], boolean expectEror) throws Exception { int val = admin.run(args); @@ -41,9 +49,14 @@ } } - /** Test quota related commands: setQuota, clrQuota, and count */ + /** Test quota related commands: + * setQuota, clrQuota, setSpaceQuota, clrSpaceQuota, and count + */ public void testQuotaCommands() throws Exception { final Configuration conf = new Configuration(); + // set a smaller block size so that we can test with smaller + // Space quotas + conf.set("dfs.block.size", "512"); final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); final FileSystem fs = cluster.getFileSystem(); assertTrue("Not a HDFS: "+fs.getUri(), @@ -52,11 +65,19 @@ DFSAdmin admin = new DFSAdmin(conf); try { + final int fileLen = 1024; + final short replication = 5; + final long spaceQuota = fileLen * replication * 15 / 8; + // 1: create a directory /test and set its quota to be 3 final Path parent = new Path("/test"); assertTrue(dfs.mkdirs(parent)); String[] args = new String[]{"-setQuota", "3", parent.toString()}; runCommand(admin, args, false); + + // set diskspace quota to 10000 + runCommand(admin, false, "-setSpaceQuota", + Long.toString(spaceQuota), parent.toString()); // 2: create directory /test/data0 final Path childDir0 = new Path(parent, "data0"); @@ -64,18 +85,22 @@ // 3: create a file /test/datafile0 final Path childFile0 = new Path(parent, "datafile0"); - OutputStream fout = dfs.create(childFile0); - fout.close(); + DFSTestUtil.createFile(fs, childFile0, fileLen, replication, 0); // 4: count -q /test ContentSummary c = dfs.getContentSummary(parent); assertEquals(c.getFileCount()+c.getDirectoryCount(), 3); assertEquals(c.getQuota(), 3); + assertEquals(c.getSpaceConsumed(), fileLen*replication); + assertEquals(c.getSpaceQuota(), spaceQuota); // 5: count -q /test/data0 c = dfs.getContentSummary(childDir0); assertEquals(c.getFileCount()+c.getDirectoryCount(), 1); assertEquals(c.getQuota(), -1); + // check disk space consumed + c = dfs.getContentSummary(parent); + assertEquals(c.getSpaceConsumed(), fileLen*replication); // 6: create a directory /test/data1 final Path childDir1 = new Path(parent, "data1"); @@ -87,6 +112,8 @@ } assertTrue(hasException); + OutputStream fout; + // 7: create a file /test/datafile1 final Path childFile1 = new Path(parent, "datafile1"); hasException = false; @@ -101,6 +128,7 @@ runCommand(admin, new String[]{"-clrQuota", parent.toString()}, false); c = dfs.getContentSummary(parent); assertEquals(c.getQuota(), -1); + assertEquals(c.getSpaceQuota(), spaceQuota); // 9: clear quota /test/data0 runCommand(admin, new String[]{"-clrQuota", childDir0.toString()}, false); @@ -108,12 +136,36 @@ assertEquals(c.getQuota(), -1); // 10: create a file /test/datafile1 - fout = dfs.create(childFile1); - fout.close(); + fout = dfs.create(childFile1, replication); + + // 10.s: but writing fileLen bytes should result in an quota exception + hasException = false; + try { + fout.write(new byte[fileLen]); + fout.close(); + } catch (QuotaExceededException e) { + hasException = true; + IOUtils.closeStream(fout); + } + assertTrue(hasException); + + //delete the file + dfs.delete(childFile1, false); + + // 9.s: clear diskspace quota + runCommand(admin, false, "-clrSpaceQuota", parent.toString()); + c = dfs.getContentSummary(parent); + assertEquals(c.getQuota(), -1); + assertEquals(c.getSpaceQuota(), -1); + + // now creating childFile1 should succeed + DFSTestUtil.createFile(dfs, childFile1, fileLen, replication, 0); // 11: set the quota of /test to be 1 args = new String[]{"-setQuota", "1", parent.toString()}; runCommand(admin, args, true); + runCommand(admin, true, "-setSpaceQuota", // for space quota + Integer.toString(fileLen), args[2]); // 12: set the quota of /test/data0 to be 1 args = new String[]{"-setQuota", "1", childDir0.toString()}; @@ -136,35 +188,49 @@ assertFalse(dfs.exists(nonExistentPath)); args = new String[]{"-setQuota", "1", nonExistentPath.toString()}; runCommand(admin, args, true); + runCommand(admin, true, "-setSpaceQuota", "1GB", // for space quota + nonExistentPath.toString()); // 14b: set quota on a file assertTrue(dfs.isFile(childFile0)); args[1] = childFile0.toString(); runCommand(admin, args, true); + // same for space quota + runCommand(admin, true, "-setSpaceQuota", "1GB", args[1]); // 15a: clear quota on a file args[0] = "-clrQuota"; runCommand(admin, args, true); + runCommand(admin, true, "-clrSpaceQuota", args[1]); // 15b: clear quota on a non-existent directory args[1] = nonExistentPath.toString(); runCommand(admin, args, true); - + runCommand(admin, true, "-clrSpaceQuota", args[1]); + // 16a: set the quota of /test to be 0 args = new String[]{"-setQuota", "0", parent.toString()}; runCommand(admin, args, true); + runCommand(admin, true, "-setSpaceQuota", "0", args[2]); // 16b: set the quota of /test to be -1 args[1] = "-1"; runCommand(admin, args, true); + runCommand(admin, true, "-setSpaceQuota", args[1], args[2]); // 16c: set the quota of /test to be Long.MAX_VALUE+1 args[1] = String.valueOf(Long.MAX_VALUE+1L); runCommand(admin, args, true); + runCommand(admin, true, "-setSpaceQuota", args[1], args[2]); // 16d: set the quota of /test to be a non integer args[1] = "33aa1.5"; runCommand(admin, args, true); + runCommand(admin, true, "-setSpaceQuota", args[1], args[2]); + + // 16e: set space quota with a value larger than Long.MAX_VALUE + runCommand(admin, true, "-setSpaceQuota", + (Long.MAX_VALUE/1024/1024 + 1024) + "TB", args[2]); // 17: setQuota by a non-administrator UnixUserGroupInformation.saveToConf(conf, @@ -173,10 +239,12 @@ DFSAdmin userAdmin = new DFSAdmin(conf); args[1] = "100"; runCommand(userAdmin, args, true); + runCommand(userAdmin, true, "-setSpaceQuota", "1GB", args[2]); // 18: clrQuota by a non-administrator args = new String[] {"-clrQuota", parent.toString()}; runCommand(userAdmin, args, true); + runCommand(userAdmin, true, "-clrSpaceQuota", args[1]); } finally { cluster.shutdown(); } @@ -198,14 +266,14 @@ // 2: set the quota of /nqdir0/qdir1 to be 6 final Path quotaDir1 = new Path("/nqdir0/qdir1"); - dfs.setQuota(quotaDir1, 6); + dfs.setQuota(quotaDir1, 6, FSConstants.QUOTA_DONT_SET); ContentSummary c = dfs.getContentSummary(quotaDir1); assertEquals(c.getDirectoryCount(), 3); assertEquals(c.getQuota(), 6); // 3: set the quota of /nqdir0/qdir1/qdir20 to be 7 final Path quotaDir2 = new Path("/nqdir0/qdir1/qdir20"); - dfs.setQuota(quotaDir2, 7); + dfs.setQuota(quotaDir2, 7, FSConstants.QUOTA_DONT_SET); c = dfs.getContentSummary(quotaDir2); assertEquals(c.getDirectoryCount(), 2); assertEquals(c.getQuota(), 7); @@ -213,7 +281,7 @@ // 4: Create directory /nqdir0/qdir1/qdir21 and set its quota to 2 final Path quotaDir3 = new Path("/nqdir0/qdir1/qdir21"); assertTrue(dfs.mkdirs(quotaDir3)); - dfs.setQuota(quotaDir3, 2); + dfs.setQuota(quotaDir3, 2, FSConstants.QUOTA_DONT_SET); c = dfs.getContentSummary(quotaDir3); assertEquals(c.getDirectoryCount(), 1); assertEquals(c.getQuota(), 2); @@ -345,4 +413,202 @@ cluster.shutdown(); } } + + /** + * Test HDFS operations that change disk space consumed by a directory tree. + * namely create, rename, delete, append, and setReplication. + * + * This is based on testNamespaceCommands() above. + */ + public void testSpaceCommands() throws Exception { + final Configuration conf = new Configuration(); + // set a smaller block size so that we can test with smaller + // diskspace quotas + conf.set("dfs.block.size", "512"); + final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); + final FileSystem fs = cluster.getFileSystem(); + assertTrue("Not a HDFS: "+fs.getUri(), + fs instanceof DistributedFileSystem); + final DistributedFileSystem dfs = (DistributedFileSystem)fs; + + try { + int fileLen = 1024; + short replication = 3; + int fileSpace = fileLen * replication; + + // create directory /nqdir0/qdir1/qdir20/nqdir30 + assertTrue(dfs.mkdirs(new Path("/nqdir0/qdir1/qdir20/nqdir30"))); + + // set the quota of /nqdir0/qdir1 to 4 * fileSpace + final Path quotaDir1 = new Path("/nqdir0/qdir1"); + dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 4 * fileSpace); + ContentSummary c = dfs.getContentSummary(quotaDir1); + assertEquals(c.getSpaceQuota(), 4 * fileSpace); + + // set the quota of /nqdir0/qdir1/qdir20 to 6 * fileSpace + final Path quotaDir20 = new Path("/nqdir0/qdir1/qdir20"); + dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 6 * fileSpace); + c = dfs.getContentSummary(quotaDir20); + assertEquals(c.getSpaceQuota(), 6 * fileSpace); + + + // Create /nqdir0/qdir1/qdir21 and set its space quota to 2 * fileSpace + final Path quotaDir21 = new Path("/nqdir0/qdir1/qdir21"); + assertTrue(dfs.mkdirs(quotaDir21)); + dfs.setQuota(quotaDir21, FSConstants.QUOTA_DONT_SET, 2 * fileSpace); + c = dfs.getContentSummary(quotaDir21); + assertEquals(c.getSpaceQuota(), 2 * fileSpace); + + // 5: Create directory /nqdir0/qdir1/qdir21/nqdir32 + Path tempPath = new Path(quotaDir21, "nqdir32"); + assertTrue(dfs.mkdirs(tempPath)); + + // create a file under nqdir32/fileDir + DFSTestUtil.createFile(dfs, new Path(tempPath, "fileDir/file1"), fileLen, + replication, 0); + c = dfs.getContentSummary(quotaDir21); + assertEquals(c.getSpaceConsumed(), fileSpace); + + // Create a larger file /nqdir0/qdir1/qdir21/nqdir33/ + boolean hasException = false; + try { + DFSTestUtil.createFile(dfs, new Path(quotaDir21, "nqdir33/file2"), + 2*fileLen, replication, 0); + } catch (QuotaExceededException e) { + hasException = true; + } + assertTrue(hasException); + // delete nqdir33 + assertTrue(dfs.delete(new Path(quotaDir21, "nqdir33"), true)); + c = dfs.getContentSummary(quotaDir21); + assertEquals(c.getSpaceConsumed(), fileSpace); + assertEquals(c.getSpaceQuota(), 2*fileSpace); + + // Verify space before the move: + c = dfs.getContentSummary(quotaDir20); + assertEquals(c.getSpaceConsumed(), 0); + + // Move /nqdir0/qdir1/qdir21/nqdir32 /nqdir0/qdir1/qdir20/nqdir30 + Path dstPath = new Path(quotaDir20, "nqdir30"); + Path srcPath = new Path(quotaDir21, "nqdir32"); + assertTrue(dfs.rename(srcPath, dstPath)); + + // verify space after the move + c = dfs.getContentSummary(quotaDir20); + assertEquals(c.getSpaceConsumed(), fileSpace); + // verify space for its parent + c = dfs.getContentSummary(quotaDir1); + assertEquals(c.getSpaceConsumed(), fileSpace); + // verify space for source for the move + c = dfs.getContentSummary(quotaDir21); + assertEquals(c.getSpaceConsumed(), 0); + + final Path file2 = new Path(dstPath, "fileDir/file2"); + int file2Len = 2 * fileLen; + // create a larger file under /nqdir0/qdir1/qdir20/nqdir30 + DFSTestUtil.createFile(dfs, file2, file2Len, replication, 0); + + c = dfs.getContentSummary(quotaDir20); + assertEquals(c.getSpaceConsumed(), 3 * fileSpace); + c = dfs.getContentSummary(quotaDir21); + assertEquals(c.getSpaceConsumed(), 0); + + // Reverse: Move /nqdir0/qdir1/qdir20/nqdir30 to /nqdir0/qdir1/qdir21/ + hasException = false; + try { + assertFalse(dfs.rename(dstPath, srcPath)); + } catch (QuotaExceededException e) { + hasException = true; + } + assertTrue(hasException); + + // make sure no intermediate directories left by failed rename + assertFalse(dfs.exists(srcPath)); + // directory should exist + assertTrue(dfs.exists(dstPath)); + + // verify space after the failed move + c = dfs.getContentSummary(quotaDir20); + assertEquals(c.getSpaceConsumed(), 3 * fileSpace); + c = dfs.getContentSummary(quotaDir21); + assertEquals(c.getSpaceConsumed(), 0); + + // Test Append : + + // verify space quota + c = dfs.getContentSummary(quotaDir1); + assertEquals(c.getSpaceQuota(), 4 * fileSpace); + + // verify space before append; + c = dfs.getContentSummary(dstPath); + assertEquals(c.getSpaceConsumed(), 3 * fileSpace); + + OutputStream out = dfs.append(file2); + // appending 1 fileLen should succeed + out.write(new byte[fileLen]); + out.close(); + + file2Len += fileLen; // after append + + // verify space after append; + c = dfs.getContentSummary(dstPath); + assertEquals(c.getSpaceConsumed(), 4 * fileSpace); + + // now increase the quota for quotaDir1 + dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 5 * fileSpace); + // Now, appending more than 1 fileLen should result in an error + out = dfs.append(file2); + hasException = false; + try { + out.write(new byte[fileLen + 1024]); + out.flush(); + out.close(); + } catch (QuotaExceededException e) { + hasException = true; + IOUtils.closeStream(out); + } + assertTrue(hasException); + + file2Len += fileLen; // after partial append + + // verify space after partial append + c = dfs.getContentSummary(dstPath); + assertEquals(c.getSpaceConsumed(), 5 * fileSpace); + + // Test set replication : + + // first reduce the replication + dfs.setReplication(file2, (short)(replication-1)); + + // verify that space is reduced by file2Len + c = dfs.getContentSummary(dstPath); + assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len); + + // now try to increase the replication and and expect an error. + hasException = false; + try { + dfs.setReplication(file2, (short)(replication+1)); + } catch (QuotaExceededException e) { + hasException = true; + } + assertTrue(hasException); + + // verify space consumed remains unchanged. + c = dfs.getContentSummary(dstPath); + assertEquals(c.getSpaceConsumed(), 5 * fileSpace - file2Len); + + // now increase the quota for quotaDir1 and quotaDir20 + dfs.setQuota(quotaDir1, FSConstants.QUOTA_DONT_SET, 10 * fileSpace); + dfs.setQuota(quotaDir20, FSConstants.QUOTA_DONT_SET, 10 * fileSpace); + + // then increasing replication should be ok. + dfs.setReplication(file2, (short)(replication+1)); + // verify increase in space + c = dfs.getContentSummary(dstPath); + assertEquals(c.getSpaceConsumed(), 5 * fileSpace + file2Len); + + } finally { + cluster.shutdown(); + } + } }
