Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java?rev=1240450&r1=1240449&r2=1240450&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java (original) +++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend.java Sat Feb 4 03:40:45 2012 @@ -17,29 +17,27 @@ */ package org.apache.hadoop.hdfs; -import static org.junit.Assert.*; +import static org.junit.Assert.assertTrue; import java.io.File; -import java.io.IOException; import java.io.FileNotFoundException; +import java.io.IOException; import java.net.InetSocketAddress; import java.util.List; -import org.junit.Test; - import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.HardLink; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.junit.Test; /** * This class tests the building blocks that are needed to @@ -133,14 +131,13 @@ public class TestFileAppend{ LocatedBlocks locations = client.getNamenode().getBlockLocations( file1.toString(), 0, Long.MAX_VALUE); List<LocatedBlock> blocks = locations.getLocatedBlocks(); - FSDataset dataset = (FSDataset) dn[0].data; // // Create hard links for a few of the blocks // for (int i = 0; i < blocks.size(); i = i + 2) { ExtendedBlock b = blocks.get(i).getBlock(); - final File f = DataNodeTestUtils.getBlockFile(dataset, + final File f = DataNodeTestUtils.getFile(dn[0], b.getBlockPoolId(), b.getLocalBlock().getBlockId()); File link = new File(f.toString() + ".link"); System.out.println("Creating hardlink for File " + f + " to " + link); @@ -154,7 +151,7 @@ public class TestFileAppend{ ExtendedBlock b = blocks.get(i).getBlock(); System.out.println("testCopyOnWrite detaching block " + b); assertTrue("Detaching block " + b + " should have returned true", - dataset.unlinkBlock(b, 1)); + DataNodeTestUtils.unlinkBlock(dn[0], b, 1)); } // Since the blocks were already detached earlier, these calls should @@ -164,7 +161,7 @@ public class TestFileAppend{ ExtendedBlock b = blocks.get(i).getBlock(); System.out.println("testCopyOnWrite detaching block " + b); assertTrue("Detaching block " + b + " should have returned false", - !dataset.unlinkBlock(b, 1)); + !DataNodeTestUtils.unlinkBlock(dn[0], b, 1)); } } finally {
Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java?rev=1240450&r1=1240449&r2=1240450&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java (original) +++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileAppend3.java Sat Feb 4 03:40:45 2012 @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hdfs; +import java.io.File; import java.io.IOException; import java.io.RandomAccessFile; @@ -35,7 +36,7 @@ import org.apache.hadoop.hdfs.protocol.E import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; -import org.apache.hadoop.hdfs.server.datanode.FSDataset; +import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; import org.apache.hadoop.hdfs.server.namenode.NameNode; @@ -199,8 +200,9 @@ public class TestFileAppend3 extends jun DatanodeInfo[] datanodeinfos = lb.getLocations(); assertEquals(repl, datanodeinfos.length); final DataNode dn = cluster.getDataNode(datanodeinfos[0].getIpcPort()); - final FSDataset data = (FSDataset)dn.getFSDataset(); - final RandomAccessFile raf = new RandomAccessFile(data.getBlockFile(blk), "rw"); + final File f = DataNodeTestUtils.getBlockFile( + dn, blk.getBlockPoolId(), blk.getLocalBlock()); + final RandomAccessFile raf = new RandomAccessFile(f, "rw"); AppendTestUtil.LOG.info("dn=" + dn + ", blk=" + blk + " (length=" + blk.getNumBytes() + ")"); assertEquals(len1, raf.length()); raf.setLength(0); Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java?rev=1240450&r1=1240449&r2=1240450&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java (original) +++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java Sat Feb 4 03:40:45 2012 @@ -17,6 +17,21 @@ */ package org.apache.hadoop.hdfs; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT; +import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_SYNCONCLOSE_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT; +import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY; + import java.io.BufferedReader; import java.io.File; import java.io.FileNotFoundException; @@ -36,7 +51,6 @@ import org.apache.hadoop.fs.FsServerDefa import org.apache.hadoop.fs.ParentNotDirectoryException; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; -import static org.apache.hadoop.hdfs.DFSConfigKeys.*; import org.apache.hadoop.hdfs.protocol.Block; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; @@ -45,7 +59,6 @@ import org.apache.hadoop.hdfs.protocol.L import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; -import org.apache.hadoop.hdfs.server.datanode.FSDataset; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.LeaseManager; @@ -829,10 +842,9 @@ public class TestFileCreation extends ju int successcount = 0; for(DatanodeInfo datanodeinfo: locatedblock.getLocations()) { DataNode datanode = cluster.getDataNode(datanodeinfo.ipcPort); - FSDataset dataset = (FSDataset)datanode.data; ExtendedBlock blk = locatedblock.getBlock(); - Block b = dataset.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId()); - final File blockfile = DataNodeTestUtils.getBlockFile(dataset, + Block b = datanode.data.getStoredBlock(blk.getBlockPoolId(), blk.getBlockId()); + final File blockfile = DataNodeTestUtils.getFile(datanode, blk.getBlockPoolId(), b.getBlockId()); System.out.println("blockfile=" + blockfile); if (blockfile != null) { Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java?rev=1240450&r1=1240449&r2=1240450&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java (original) +++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHftpFileSystem.java Sat Feb 4 03:40:45 2012 @@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs; import java.io.IOException; import java.net.URISyntaxException; +import java.net.URI; import java.net.URL; import java.net.HttpURLConnection; import java.util.Random; @@ -232,4 +233,164 @@ public class TestHftpFileSystem { in.seek(7); assertEquals('7', in.read()); } + + public void resetFileSystem() throws IOException { + // filesystem caching has a quirk/bug that it caches based on the user's + // given uri. the result is if a filesystem is instantiated with no port, + // it gets the default port. then if the default port is changed, + // and another filesystem is instantiated with no port, the prior fs + // is returned, not a new one using the changed port. so let's flush + // the cache between tests... + FileSystem.closeAll(); + } + + @Test + public void testHftpDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hftp://localhost"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHftpCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hftp://localhost"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(123, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:456", + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHftpCustomUriPortWithDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hftp://localhost:123"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHftpCustomUriPortWithCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hftp://localhost:789"); + HftpFileSystem fs = (HftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(123, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:456", + fs.getCanonicalServiceName() + ); + } + + /// + + @Test + public void testHsftpDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hsftp://localhost"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:"+DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHsftpCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hsftp://localhost"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(456, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:456", + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHsftpCustomUriPortWithDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + URI uri = URI.create("hsftp://localhost:123"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultPort()); + assertEquals(DFSConfigKeys.DFS_NAMENODE_HTTPS_PORT_DEFAULT, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:123", + fs.getCanonicalServiceName() + ); + } + + @Test + public void testHsftpCustomUriPortWithCustomDefaultPorts() throws IOException { + resetFileSystem(); + Configuration conf = new Configuration(); + conf.setInt("dfs.http.port", 123); + conf.setInt("dfs.https.port", 456); + + URI uri = URI.create("hsftp://localhost:789"); + HsftpFileSystem fs = (HsftpFileSystem) FileSystem.get(uri, conf); + + assertEquals(456, fs.getDefaultPort()); + assertEquals(456, fs.getDefaultSecurePort()); + + assertEquals(uri, fs.getUri()); + assertEquals( + "127.0.0.1:789", + fs.getCanonicalServiceName() + ); + } } Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java?rev=1240450&r1=1240449&r2=1240450&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java (original) +++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/DataNodeTestUtils.java Sat Feb 4 03:40:45 2012 @@ -22,6 +22,8 @@ package org.apache.hadoop.hdfs.server.da import java.io.File; import java.io.IOException; +import org.apache.hadoop.hdfs.protocol.Block; +import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration; /** @@ -39,7 +41,18 @@ public class DataNodeTestUtils { return dn.getDNRegistrationForBP(bpid); } - public static File getBlockFile(FSDataset fsdataset, String bpid, long bid) { - return fsdataset.getFile(bpid, bid); + public static File getFile(DataNode dn, String bpid, long bid) { + return ((FSDataset)dn.getFSDataset()).getFile(bpid, bid); + } + + public static File getBlockFile(DataNode dn, String bpid, Block b + ) throws IOException { + return ((FSDataset)dn.getFSDataset()).getBlockFile(bpid, b); + } + + public static boolean unlinkBlock(DataNode dn, ExtendedBlock block, int numLinks + ) throws IOException { + ReplicaInfo info = ((FSDataset)dn.getFSDataset()).getReplicaInfo(block); + return info.unlinkBlock(numLinks); } } Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java?rev=1240450&r1=1240449&r2=1240450&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java (original) +++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestEditLog.java Sat Feb 4 03:40:45 2012 @@ -116,10 +116,12 @@ public class TestEditLog extends TestCas int numTransactions; short replication = 3; long blockSize = 64; + int startIndex; - Transactions(FSNamesystem ns, int num) { + Transactions(FSNamesystem ns, int numTx, int startIdx) { namesystem = ns; - numTransactions = num; + numTransactions = numTx; + startIndex = startIdx; } // add a bunch of transactions. @@ -131,8 +133,8 @@ public class TestEditLog extends TestCas for (int i = 0; i < numTransactions; i++) { INodeFileUnderConstruction inode = new INodeFileUnderConstruction( p, replication, blockSize, 0, "", "", null); - editLog.logOpenFile("/filename" + i, inode); - editLog.logCloseFile("/filename" + i, inode); + editLog.logOpenFile("/filename" + startIndex + i, inode); + editLog.logCloseFile("/filename" + startIndex + i, inode); editLog.logSync(); } } @@ -280,7 +282,8 @@ public class TestEditLog extends TestCas // Create threads and make them run transactions concurrently. Thread threadId[] = new Thread[NUM_THREADS]; for (int i = 0; i < NUM_THREADS; i++) { - Transactions trans = new Transactions(namesystem, NUM_TRANSACTIONS); + Transactions trans = + new Transactions(namesystem, NUM_TRANSACTIONS, i*NUM_TRANSACTIONS); threadId[i] = new Thread(trans, "TransactionThread-" + i); threadId[i].start(); } @@ -293,11 +296,16 @@ public class TestEditLog extends TestCas i--; // retry } } - + + // Reopen some files as for append + Transactions trans = + new Transactions(namesystem, NUM_TRANSACTIONS, NUM_TRANSACTIONS / 2); + trans.run(); + // Roll another time to finalize edits_inprogress_3 fsimage.rollEditLog(); - long expectedTxns = (NUM_THREADS * 2 * NUM_TRANSACTIONS) + 2; // +2 for start/end txns + long expectedTxns = ((NUM_THREADS+1) * 2 * NUM_TRANSACTIONS) + 2; // +2 for start/end txns // Verify that we can read in all the transactions that we have written. // If there were any corruptions, it is likely that the reading in Modified: hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java?rev=1240450&r1=1240449&r2=1240450&view=diff ============================================================================== --- hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java (original) +++ hadoop/common/branches/branch-0.23-PB/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java Sat Feb 4 03:40:45 2012 @@ -25,6 +25,7 @@ import javax.management.ObjectName; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; +import org.apache.hadoop.util.VersionInfo; import org.junit.Test; import junit.framework.Assert; @@ -57,6 +58,8 @@ public class TestNameNodeMXBean { // get attribute "Version" String version = (String) mbs.getAttribute(mxbeanName, "Version"); Assert.assertEquals(fsn.getVersion(), version); + Assert.assertTrue(version.equals(VersionInfo.getVersion() + + ", r" + VersionInfo.getRevision())); // get attribute "Used" Long used = (Long) mbs.getAttribute(mxbeanName, "Used"); Assert.assertEquals(fsn.getUsed(), used.longValue());
