Author: kihwal Date: Mon Apr 7 18:25:12 2014 New Revision: 1585544 URL: http://svn.apache.org/r1585544 Log: HDFS-6191. Disable quota checks when replaying edit log.
Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1585544&r1=1585543&r2=1585544&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Mon Apr 7 18:25:12 2014 @@ -283,6 +283,8 @@ Release 2.5.0 - UNRELEASED HDFS-6167. Relocate the non-public API classes in the hdfs.client package. (szetszwo) + HDFS-6191. Disable quota checks when replaying edit log. (kihwal) + OPTIMIZATIONS BUG FIXES Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java?rev=1585544&r1=1585543&r2=1585544&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java Mon Apr 7 18:25:12 2014 @@ -134,6 +134,7 @@ public class BackupNode extends NameNode BN_SAFEMODE_EXTENSION_DEFAULT); BackupImage bnImage = new BackupImage(conf); this.namesystem = new FSNamesystem(conf, bnImage); + namesystem.dir.disableQuotaChecks(); bnImage.setNamesystem(namesystem); bnImage.recoverCreateRead(); } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java?rev=1585544&r1=1585543&r2=1585544&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java Mon Apr 7 18:25:12 2014 @@ -117,6 +117,7 @@ public class FSDirectory implements Clos FSImage fsImage; private final FSNamesystem namesystem; private volatile boolean ready = false; + private volatile boolean skipQuotaCheck = false; //skip while consuming edits private final int maxComponentLength; private final int maxDirItems; private final int lsLimit; // max list limit @@ -283,6 +284,16 @@ public class FSDirectory implements Clos } } + /** Enable quota verification */ + void enableQuotaChecks() { + skipQuotaCheck = false; + } + + /** Disable quota verification */ + void disableQuotaChecks() { + skipQuotaCheck = true; + } + /** * Add the given filename to the fs. * @throws FileAlreadyExistsException @@ -1825,7 +1836,7 @@ public class FSDirectory implements Clos if (numOfINodes > inodes.length) { numOfINodes = inodes.length; } - if (checkQuota) { + if (checkQuota && !skipQuotaCheck) { verifyQuota(inodes, numOfINodes, nsDelta, dsDelta, null); } unprotectedUpdateCount(iip, numOfINodes, nsDelta, dsDelta); @@ -2117,7 +2128,7 @@ public class FSDirectory implements Clos */ private void verifyQuotaForRename(INode[] src, INode[] dst) throws QuotaExceededException { - if (!ready) { + if (!ready || skipQuotaCheck) { // Do not check quota if edits log is still being processed return; } Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java?rev=1585544&r1=1585543&r2=1585544&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java Mon Apr 7 18:25:12 2014 @@ -1033,7 +1033,9 @@ public class FSNamesystem implements Nam dir.fsImage.editLog.openForWrite(); } - + + // Enable quota checks. + dir.enableQuotaChecks(); if (haEnabled) { // Renew all of the leases before becoming active. // This is because, while we were in standby mode, @@ -1140,6 +1142,8 @@ public class FSNamesystem implements Nam blockManager.setPostponeBlocksFromFuture(true); + // Disable quota checks while in standby. + dir.disableQuotaChecks(); editLogTailer = new EditLogTailer(this, conf); editLogTailer.start(); if (standbyShouldCheckpoint) { Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java?rev=1585544&r1=1585543&r2=1585544&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java Mon Apr 7 18:25:12 2014 @@ -248,6 +248,9 @@ public class SecondaryNameNode implement namesystem = new FSNamesystem(conf, checkpointImage, true); + // Disable quota checks + namesystem.dir.disableQuotaChecks(); + // Initialize other scheduling parameters from the configuration checkpointConf = new CheckpointConf(conf); @@ -850,7 +853,7 @@ public class SecondaryNameNode implement Collection<URI> imageDirs, List<URI> editsDirs) throws IOException { super(conf, imageDirs, editsDirs); - + // the 2NN never writes edits -- it only downloads them. So // we shouldn't have any editLog instance. Setting to null // makes sure we don't accidentally depend on it. Modified: hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java URL: http://svn.apache.org/viewvc/hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java?rev=1585544&r1=1585543&r2=1585544&view=diff ============================================================================== --- hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java (original) +++ hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java Mon Apr 7 18:25:12 2014 @@ -20,12 +20,15 @@ package org.apache.hadoop.hdfs.server.na import java.io.BufferedReader; +import java.io.IOException; import java.io.StringReader; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -55,6 +58,7 @@ public class TestFSDirectory { private final Path file5 = new Path(sub1, "z_file5"); private final Path sub2 = new Path(dir, "sub2"); + private final Path file6 = new Path(sub2, "file6"); private Configuration conf; private MiniDFSCluster cluster; @@ -124,6 +128,41 @@ public class TestFSDirectory { fsdir.imageLoadComplete(); Assert.assertTrue(fsdir.isReady()); } + + @Test + public void testSkipQuotaCheck() throws Exception { + try { + // set quota. nsQuota of 1 means no files can be created + // under this directory. + hdfs.setQuota(sub2, 1, Long.MAX_VALUE); + + // create a file + try { + // this should fail + DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed); + throw new IOException("The create should have failed."); + } catch (NSQuotaExceededException qe) { + // ignored + } + // disable the quota check and retry. this should succeed. + fsdir.disableQuotaChecks(); + DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed); + + // trying again after re-enabling the check. + hdfs.delete(file6, false); // cleanup + fsdir.enableQuotaChecks(); + try { + // this should fail + DFSTestUtil.createFile(hdfs, file6, 1024, REPLICATION, seed); + throw new IOException("The create should have failed."); + } catch (NSQuotaExceededException qe) { + // ignored + } + } finally { + hdfs.delete(file6, false); // cleanup, in case the test failed in the middle. + hdfs.setQuota(sub2, Long.MAX_VALUE, Long.MAX_VALUE); + } + } static void checkClassName(String line) { int i = line.lastIndexOf('(');