Author: wang Date: Thu Aug 15 00:15:11 2013 New Revision: 1514105 URL: http://svn.apache.org/r1514105 Log: merge trunk into HDFS-4949 branch
Added: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/ - copied from r1514104, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java - copied unchanged from r1514104, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ (props changed) hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ (props changed) hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ (props changed) hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ (props changed) hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1512448-1514104 Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Thu Aug 15 00:15:11 2013 @@ -265,6 +265,11 @@ Release 2.3.0 - UNRELEASED HDFS-5035. getFileLinkStatus and rename do not correctly check permissions of symlinks. (Andrew Wang via Colin Patrick McCabe) + HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi) + + HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image + transfer. (Andrew Wang) + Release 2.1.1-beta - UNRELEASED INCOMPATIBLE CHANGES @@ -304,6 +309,12 @@ Release 2.1.1-beta - UNRELEASED HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1 instead of 0 to avoid confusing applications. (brandonli) + HDFS-4993. Fsck can fail if a file is renamed or deleted. (Robert Parker + via kihwal) + + HDFS-5091. Support for spnego keytab separate from the JournalNode keytab + for secure HA. (jing9) + Release 2.1.0-beta - 2013-08-06 INCOMPATIBLE CHANGES Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/pom.xml Thu Aug 15 00:15:11 2013 @@ -417,6 +417,8 @@ http://maven.apache.org/xsd/maven-4.0.0. <goal>protoc</goal> </goals> <configuration> + <protocVersion>${protobuf.version}</protocVersion> + <protocCommand>${protoc.path}</protocCommand> <imports> <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param> <param>${basedir}/src/main/proto</param> @@ -441,6 +443,8 @@ http://maven.apache.org/xsd/maven-4.0.0. <goal>protoc</goal> </goals> <configuration> + <protocVersion>${protobuf.version}</protocVersion> + <protocCommand>${protoc.path}</protocCommand> <imports> <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param> <param>${basedir}/src/main/proto</param> @@ -462,6 +466,8 @@ http://maven.apache.org/xsd/maven-4.0.0. <goal>protoc</goal> </goals> <configuration> + <protocVersion>${protobuf.version}</protocVersion> + <protocCommand>${protoc.path}</protocCommand> <imports> <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param> <param>${basedir}/src/main/proto</param> @@ -483,6 +489,8 @@ http://maven.apache.org/xsd/maven-4.0.0. <goal>protoc</goal> </goals> <configuration> + <protocVersion>${protobuf.version}</protocVersion> + <protocCommand>${protoc.path}</protocCommand> <imports> <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param> <param>${basedir}/src/main/proto</param> Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml Thu Aug 15 00:15:11 2013 @@ -103,6 +103,8 @@ http://maven.apache.org/xsd/maven-4.0.0. <goal>protoc</goal> </goals> <configuration> + <protocVersion>${protobuf.version}</protocVersion> + <protocCommand>${protoc.path}</protocCommand> <imports> <param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param> <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param> Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1512448-1514104 Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java Thu Aug 15 00:15:11 2013 @@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFac import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSConfigKeys; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.http.HttpServer; import org.apache.hadoop.net.NetUtils; @@ -74,7 +75,7 @@ public class JournalNodeHttpServer { { if (UserGroupInformation.isSecurityEnabled()) { initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY, - DFS_JOURNALNODE_KEYTAB_FILE_KEY); + DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY)); } } }; Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Thu Aug 15 00:15:11 2013 @@ -142,7 +142,7 @@ public class NamenodeFsck { /** * Filesystem checker. * @param conf configuration (namenode config) - * @param nn namenode that this fsck is going to use + * @param namenode namenode that this fsck is going to use * @param pmap key=value[] map passed to the http servlet as url parameters * @param out output stream to write the fsck output * @param totalDatanodes number of live datanodes @@ -302,8 +302,13 @@ public class NamenodeFsck { long fileLen = file.getLen(); // Get block locations without updating the file access time // and without block access tokens - LocatedBlocks blocks = namenode.getNamesystem().getBlockLocations(path, 0, - fileLen, false, false, false); + LocatedBlocks blocks; + try { + blocks = namenode.getNamesystem().getBlockLocations(path, 0, + fileLen, false, false, false); + } catch (FileNotFoundException fnfe) { + blocks = null; + } if (blocks == null) { // the file is deleted return; } Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java Thu Aug 15 00:15:11 2013 @@ -17,9 +17,17 @@ */ package org.apache.hadoop.hdfs.server.namenode.ha; +import static org.apache.hadoop.util.Time.now; + import java.io.IOException; import java.net.InetSocketAddress; import java.security.PrivilegedAction; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Executors; +import java.util.concurrent.Future; +import java.util.concurrent.ThreadFactory; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -38,10 +46,10 @@ import org.apache.hadoop.hdfs.util.Cance import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.security.UserGroupInformation; -import static org.apache.hadoop.util.Time.now; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; +import com.google.common.util.concurrent.ThreadFactoryBuilder; /** * Thread which runs inside the NN when it's in Standby state, @@ -57,6 +65,7 @@ public class StandbyCheckpointer { private final FSNamesystem namesystem; private long lastCheckpointTime; private final CheckpointerThread thread; + private final ThreadFactory uploadThreadFactory; private String activeNNAddress; private InetSocketAddress myNNAddress; @@ -72,6 +81,8 @@ public class StandbyCheckpointer { this.namesystem = ns; this.checkpointConf = new CheckpointConf(conf); this.thread = new CheckpointerThread(); + this.uploadThreadFactory = new ThreadFactoryBuilder().setDaemon(true) + .setNameFormat("TransferFsImageUpload-%d").build(); setNameNodeAddresses(conf); } @@ -142,7 +153,7 @@ public class StandbyCheckpointer { private void doCheckpoint() throws InterruptedException, IOException { assert canceler != null; - long txid; + final long txid; namesystem.writeLockInterruptibly(); try { @@ -171,9 +182,26 @@ public class StandbyCheckpointer { } // Upload the saved checkpoint back to the active - TransferFsImage.uploadImageFromStorage( - activeNNAddress, myNNAddress, - namesystem.getFSImage().getStorage(), txid); + // Do this in a separate thread to avoid blocking transition to active + // See HDFS-4816 + ExecutorService executor = + Executors.newSingleThreadExecutor(uploadThreadFactory); + Future<Void> upload = executor.submit(new Callable<Void>() { + @Override + public Void call() throws IOException { + TransferFsImage.uploadImageFromStorage( + activeNNAddress, myNNAddress, + namesystem.getFSImage().getStorage(), txid); + return null; + } + }); + executor.shutdown(); + try { + upload.get(); + } catch (ExecutionException e) { + throw new IOException("Exception during image upload: " + e.getMessage(), + e.getCause()); + } } /** @@ -301,6 +329,7 @@ public class StandbyCheckpointer { LOG.info("Checkpoint was cancelled: " + ce.getMessage()); canceledCount++; } catch (InterruptedException ie) { + LOG.info("Interrupted during checkpointing", ie); // Probably requested shutdown. continue; } catch (Throwable t) { Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/native/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/native:r1512448-1514104 Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/datanode:r1509426-1514104 Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1512448-1514104 Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/secondary:r1512448-1514104 Propchange: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/test/hdfs:r1509426-1514104 Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java Thu Aug 15 00:15:11 2013 @@ -405,6 +405,8 @@ public class TestGlobPaths { status = fs.globStatus(new Path("/x/x"), falseFilter); assertNull(status); + + cleanupDFS(); } private void checkStatus(FileStatus[] status, Path ... expectedMatches) { @@ -783,8 +785,7 @@ public class TestGlobPaths { return globResults; } - @After - public void cleanupDFS() throws IOException { + private void cleanupDFS() throws IOException { fs.delete(new Path(USER_DIR), true); } Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java Thu Aug 15 00:15:11 2013 @@ -42,7 +42,8 @@ public class TestSymlinkHdfsDisable { DistributedFileSystem dfs = cluster.getFileSystem(); FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf); // Create test files/links - FileContextTestHelper helper = new FileContextTestHelper(); + FileContextTestHelper helper = new FileContextTestHelper( + "/tmp/TestSymlinkHdfsDisable"); Path root = helper.getTestRootPath(fc); Path target = new Path(root, "target"); Path link = new Path(root, "link"); Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Thu Aug 15 00:15:11 2013 @@ -23,6 +23,7 @@ import static org.junit.Assert.assertFal import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; import java.io.BufferedReader; import java.io.ByteArrayOutputStream; @@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSInputStream; import org.apache.hadoop.hdfs.DFSTestUtil; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.MiniDFSCluster; @@ -81,6 +83,8 @@ import org.apache.log4j.RollingFileAppen import org.junit.Test; import com.google.common.collect.Sets; +import org.mockito.Mockito; +import static org.mockito.Mockito.*; /** * A JUnit test for doing fsck @@ -876,6 +880,59 @@ public class TestFsck { } } + /** Test fsck with FileNotFound */ + @Test + public void testFsckFileNotFound() throws Exception { + + // Number of replicas to actually start + final short NUM_REPLICAS = 1; + + Configuration conf = new Configuration(); + NameNode namenode = mock(NameNode.class); + NetworkTopology nettop = mock(NetworkTopology.class); + Map<String,String[]> pmap = new HashMap<String, String[]>(); + Writer result = new StringWriter(); + PrintWriter out = new PrintWriter(result, true); + InetAddress remoteAddress = InetAddress.getLocalHost(); + FSNamesystem fsName = mock(FSNamesystem.class); + when(namenode.getNamesystem()).thenReturn(fsName); + when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(), + anyBoolean(), anyBoolean(), anyBoolean())). + thenThrow(new FileNotFoundException()) ; + + NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, + NUM_REPLICAS, (short)1, remoteAddress); + + String pathString = "/tmp/testFile"; + + long length = 123L; + boolean isDir = false; + int blockReplication = 1; + long blockSize = 128 *1024L; + long modTime = 123123123L; + long accessTime = 123123120L; + FsPermission perms = FsPermission.getDefault(); + String owner = "foo"; + String group = "bar"; + byte [] symlink = null; + byte [] path = new byte[128]; + path = DFSUtil.string2Bytes(pathString); + long fileId = 312321L; + int numChildren = 1; + + HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication, + blockSize, modTime, accessTime, perms, owner, group, symlink, path, + fileId, numChildren); + Result res = new Result(conf); + + try { + fsck.check(pathString, file, res); + } catch (Exception e) { + fail("Unexpected exception "+ e.getMessage()); + } + assertTrue(res.toString().contains("HEALTHY")); + } + /** Test fsck with symlinks in the filesystem */ @Test public void testFsckSymlink() throws Exception { Modified: hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java?rev=1514105&r1=1514104&r2=1514105&view=diff ============================================================================== --- hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java (original) +++ hadoop/common/branches/HDFS-4949/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java Thu Aug 15 00:15:11 2013 @@ -239,6 +239,34 @@ public class TestStandbyCheckpoints { assertTrue(canceledOne); } + + /** + * Test cancellation of ongoing checkpoints when failover happens + * mid-checkpoint during image upload from standby to active NN. + */ + @Test(timeout=60000) + public void testCheckpointCancellationDuringUpload() throws Exception { + // don't compress, we want a big image + cluster.getConfiguration(0).setBoolean( + DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false); + cluster.getConfiguration(1).setBoolean( + DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false); + // Throttle SBN upload to make it hang during upload to ANN + cluster.getConfiguration(1).setLong( + DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 100); + cluster.restartNameNode(0); + cluster.restartNameNode(1); + nn0 = cluster.getNameNode(0); + nn1 = cluster.getNameNode(1); + + cluster.transitionToActive(0); + + doEdits(0, 100); + HATestUtil.waitForStandbyToCatchUp(nn0, nn1); + HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(104)); + cluster.transitionToStandby(0); + cluster.transitionToActive(1); + } /** * Make sure that clients will receive StandbyExceptions even when a