Author: cdouglas
Date: Thu Dec 11 17:02:39 2008
New Revision: 725873
URL: http://svn.apache.org/viewvc?rev=725873&view=rev
Log:
HADOOP-4728. Add a test exercising different namenode configurations.
Contributed by Boris Shkolnik.
Added:
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Modified:
hadoop/core/trunk/CHANGES.txt
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
Modified: hadoop/core/trunk/CHANGES.txt
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=725873&r1=725872&r2=725873&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Thu Dec 11 17:02:39 2008
@@ -221,6 +221,9 @@
HADOOP-4737. Adds the KILLED notification when jobs get killed.
(Amareshwari Sriramadasu via ddas)
+ HADOOP-4728. Add a test exercising different namenode configurations.
+ (Boris Shkolnik via cdouglas)
+
OPTIMIZATIONS
HADOOP-3293. Fixes FileInputFormat to do provide locations for splits
Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=725873&r1=725872&r2=725873&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Thu
Dec 11 17:02:39 2008
@@ -269,7 +269,7 @@
String[] args = (operation == null ||
operation == StartupOption.FORMAT ||
operation == StartupOption.REGULAR) ?
- new String[] {} : new String[] {"-"+operation.toString()};
+ new String[] {} : new String[] {operation.getName()};
conf.setClass("topology.node.switch.mapping.impl",
StaticMapping.class, DNSToSwitchMapping.class);
nameNode = NameNode.createNameNode(args, conf);
@@ -369,11 +369,11 @@
conf.set("dfs.datanode.http.address", "127.0.0.1:0");
conf.set("dfs.datanode.ipc.address", "127.0.0.1:0");
- String[] args = (operation == null ||
- operation == StartupOption.FORMAT ||
- operation == StartupOption.REGULAR) ?
- null : new String[] {"-"+operation.toString()};
- String [] dnArgs = (operation == StartupOption.UPGRADE) ? null : args;
+
+ String [] dnArgs = (operation == null ||
+ operation != StartupOption.ROLLBACK) ?
+ null : new String[] {operation.getName()};
+
for (int i = curDatanodesNum; i < curDatanodesNum+numDataNodes; i++) {
Configuration dnConf = new Configuration(conf);
Added:
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
URL:
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java?rev=725873&view=auto
==============================================================================
---
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
(added)
+++
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestStartup.java
Thu Dec 11 17:02:39 2008
@@ -0,0 +1,305 @@
+package org.apache.hadoop.hdfs.server.namenode;
+
+import java.io.File;
+import java.io.IOException;
+import java.util.Iterator;
+import java.util.List;
+import java.util.Random;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeDirType;
+import org.apache.hadoop.hdfs.server.namenode.FSImage.NameNodeFile;
+import org.apache.hadoop.util.StringUtils;
+
+/**
+ * Startup and checkpoint tests
+ *
+ */
+public class TestStartup extends TestCase {
+ public static final String NAME_NODE_HOST = "localhost:";
+ public static final String NAME_NODE_HTTP_HOST = "0.0.0.0:";
+ private static final Log LOG =
+ LogFactory.getLog(TestStartup.class.getName());
+ private Configuration config;
+ private File hdfsDir=null;
+ static final long seed = 0xAAAAEEFL;
+ static final int blockSize = 4096;
+ static final int fileSize = 8192;
+ private long editsLength=0, fsimageLength=0;
+
+
+ private void writeFile(FileSystem fileSys, Path name, int repl)
+ throws IOException {
+ FSDataOutputStream stm = fileSys.create(name, true,
+ fileSys.getConf().getInt("io.file.buffer.size", 4096),
+ (short)repl, (long)blockSize);
+ byte[] buffer = new byte[fileSize];
+ Random rand = new Random(seed);
+ rand.nextBytes(buffer);
+ stm.write(buffer);
+ stm.close();
+ }
+
+
+ protected void setUp() throws Exception {
+ config = new Configuration();
+ String baseDir = System.getProperty("test.build.data", "/tmp");
+
+ hdfsDir = new File(baseDir, "dfs");
+ if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+ throw new IOException("Could not delete hdfs directory '" + hdfsDir +
"'");
+ }
+ LOG.info("--hdfsdir is " + hdfsDir.getAbsolutePath());
+ config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
+ config.set("dfs.data.dir", new File(hdfsDir, "data").getPath());
+
+ config.set("fs.checkpoint.dir",new File(hdfsDir, "secondary").getPath());
+ //config.set("fs.default.name", "hdfs://"+ NAME_NODE_HOST + "0");
+
+ FileSystem.setDefaultUri(config, "hdfs://"+NAME_NODE_HOST + "0");
+ }
+
+ /**
+ * clean up
+ */
+ public void tearDown() throws Exception {
+ if ( hdfsDir.exists() && !FileUtil.fullyDelete(hdfsDir) ) {
+ throw new IOException("Could not delete hdfs directory in tearDown '" +
hdfsDir + "'");
+ }
+ }
+
+ /**
+ * start MiniDFScluster, create a file (to create edits) and do a checkpoint
+ * @throws IOException
+ */
+ public void createCheckPoint() throws IOException {
+ LOG.info("--starting mini cluster");
+ // manage dirs parameter set to false
+ MiniDFSCluster cluster = null;
+ SecondaryNameNode sn = null;
+
+ try {
+ cluster = new MiniDFSCluster(0, config, 1, true, false, false, null,
null, null, null);
+ cluster.waitActive();
+
+ LOG.info("--starting Secondary Node");
+
+ // start secondary node
+ sn = new SecondaryNameNode(config);
+ assertNotNull(sn);
+
+ // create a file
+ FileSystem fileSys = cluster.getFileSystem();
+ Path file1 = new Path("t1");
+ this.writeFile(fileSys, file1, 1);
+
+ LOG.info("--doing checkpoint");
+ sn.doCheckpoint(); // this shouldn't fail
+ LOG.info("--done checkpoint");
+ } catch (IOException e) {
+ fail(StringUtils.stringifyException(e));
+ System.err.println("checkpoint failed");
+ throw e;
+ } finally {
+ if(sn!=null)
+ sn.shutdown();
+ if(cluster!=null)
+ cluster.shutdown();
+ LOG.info("--file t1 created, cluster shutdown");
+ }
+ }
+
+ /*
+ * corrupt files by removing and recreating the directory
+ */
+ private void corruptNameNodeFiles() throws IOException {
+ // now corrupt/delete the directrory
+ List<File> nameDirs = (List<File>)FSNamesystem.getNamespaceDirs(config);
+ List<File> nameEditsDirs =
(List<File>)FSNamesystem.getNamespaceEditsDirs(config);
+
+ // get name dir and its length, then delete and recreate the directory
+ File dir = nameDirs.get(0); // has only one
+ this.fsimageLength = new File(new File(dir, "current"),
+ NameNodeFile.IMAGE.getName()).length();
+
+ if(dir.exists() && !(FileUtil.fullyDelete(dir)))
+ throw new IOException("Cannot remove directory: " + dir);
+
+ LOG.info("--removed dir "+dir + ";len was ="+ this.fsimageLength);
+
+ if (!dir.mkdirs())
+ throw new IOException("Cannot create directory " + dir);
+
+ dir = nameEditsDirs.get(0); //has only one
+
+ this.editsLength = new File(new File(dir, "current"),
+ NameNodeFile.EDITS.getName()).length();
+
+ if(dir.exists() && !(FileUtil.fullyDelete(dir)))
+ throw new IOException("Cannot remove directory: " + dir);
+ if (!dir.mkdirs())
+ throw new IOException("Cannot create directory " + dir);
+
+ LOG.info("--removed dir and recreated "+dir + ";len was ="+
this.editsLength);
+
+
+ }
+
+ /**
+ * start with -importCheckpoint option and verify that the files are in
separate directories and of the right length
+ * @throws IOException
+ */
+ private void checkNameNodeFiles() throws IOException{
+
+ // start namenode with import option
+ LOG.info("-- about to start DFS cluster");
+ MiniDFSCluster cluster = null;
+ try {
+ cluster = new MiniDFSCluster(0, config, 1, false, false, false,
StartupOption.IMPORT, null, null, null);
+ cluster.waitActive();
+ LOG.info("--NN started with checkpoint option");
+ NameNode nn = cluster.getNameNode();
+ assertNotNull(nn);
+ // Verify that image file sizes did not change.
+ FSImage image = nn.getFSImage();
+ verifyDifferentDirs(image, this.fsimageLength, this.editsLength);
+ } finally {
+ if(cluster != null)
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * verify that edits log and fsimage are in different directories and of a
correct size
+ */
+ private void verifyDifferentDirs(FSImage img, long expectedImgSize, long
expectedEditsSize) {
+ StorageDirectory sd =null;
+ for (Iterator<StorageDirectory> it = img.dirIterator(); it.hasNext();) {
+ sd = it.next();
+
+ if(sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
+ File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
+ LOG.info("--image file " + imf.getAbsolutePath() + "; len = " +
imf.length() + "; expected = " + expectedImgSize);
+ assertEquals(expectedImgSize, imf.length());
+ } else if(sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
+ File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
+ LOG.info("-- edits file " + edf.getAbsolutePath() + "; len = " +
edf.length() + "; expected = " + expectedEditsSize);
+ assertEquals(expectedEditsSize, edf.length());
+ } else {
+ fail("Image/Edits directories are not different");
+ }
+ }
+
+ }
+ /**
+ * secnn-6
+ * checkpoint for edits and image is the same directory
+ * @throws IOException
+ */
+ public void testChkpointStartup2() throws IOException{
+ LOG.info("--starting checkpointStartup2 - same directory for checkpoint");
+ // different name dirs
+ config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
+ config.set("dfs.name.edits.dir", new File(hdfsDir, "edits").getPath());
+ // same checkpoint dirs
+ config.set("fs.checkpoint.edits.dir", new File(hdfsDir,
"chkpt").getPath());
+ config.set("fs.checkpoint.dir", new File(hdfsDir, "chkpt").getPath());
+
+ createCheckPoint();
+
+ corruptNameNodeFiles();
+ checkNameNodeFiles();
+
+ }
+
+ /**
+ * seccn-8
+ * checkpoint for edits and image are different directories
+ * @throws IOException
+ */
+ public void testChkpointStartup1() throws IOException{
+ //setUpConfig();
+ LOG.info("--starting testStartup Recovery");
+ // different name dirs
+ config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
+ config.set("dfs.name.edits.dir", new File(hdfsDir, "edits").getPath());
+ // same checkpoint dirs
+ config.set("fs.checkpoint.edits.dir", new File(hdfsDir,
"chkpt_edits").getPath());
+ config.set("fs.checkpoint.dir", new File(hdfsDir, "chkpt").getPath());
+
+ createCheckPoint();
+ corruptNameNodeFiles();
+ checkNameNodeFiles();
+ }
+
+ /**
+ * secnn-7
+ * secondary node copies fsimage and edits into correct separate directories.
+ * @throws IOException
+ */
+ public void testSNNStartup() throws IOException{
+ //setUpConfig();
+ LOG.info("--starting SecondNN startup test");
+ // different name dirs
+ config.set("dfs.name.dir", new File(hdfsDir, "name").getPath());
+ config.set("dfs.name.edits.dir", new File(hdfsDir, "name").getPath());
+ // same checkpoint dirs
+ config.set("fs.checkpoint.edits.dir", new File(hdfsDir,
"chkpt_edits").getPath());
+ config.set("fs.checkpoint.dir", new File(hdfsDir, "chkpt").getPath());
+
+ LOG.info("--starting NN ");
+ MiniDFSCluster cluster = null;
+ SecondaryNameNode sn = null;
+ NameNode nn = null;
+ try {
+ cluster = new MiniDFSCluster(0, config, 1, true, false, false, null,
null, null, null);
+ cluster.waitActive();
+ nn = cluster.getNameNode();
+ assertNotNull(nn);
+
+ // start secondary node
+ LOG.info("--starting SecondNN");
+ sn = new SecondaryNameNode(config);
+ assertNotNull(sn);
+
+ LOG.info("--doing checkpoint");
+ sn.doCheckpoint(); // this shouldn't fail
+ LOG.info("--done checkpoint");
+
+
+
+ // now verify that image and edits are created in the different
directories
+ FSImage image = nn.getFSImage();
+ StorageDirectory sd = image.getStorageDir(0); //only one
+ assertEquals(sd.getStorageDirType(), NameNodeDirType.IMAGE_AND_EDITS);
+ File imf = FSImage.getImageFile(sd, NameNodeFile.IMAGE);
+ File edf = FSImage.getImageFile(sd, NameNodeFile.EDITS);
+ LOG.info("--image file " + imf.getAbsolutePath() + "; len = " +
imf.length());
+ LOG.info("--edits file " + edf.getAbsolutePath() + "; len = " +
edf.length());
+
+ FSImage chkpImage = sn.getFSImage();
+ verifyDifferentDirs(chkpImage, imf.length(), edf.length());
+
+ } catch (IOException e) {
+ fail(StringUtils.stringifyException(e));
+ System.err.println("checkpoint failed");
+ throw e;
+ } finally {
+ if(sn!=null)
+ sn.shutdown();
+ if(cluster!=null)
+ cluster.shutdown();
+ }
+ }
+}