Author: arp Date: Tue Oct 22 17:24:52 2013 New Revision: 1534707 URL: http://svn.apache.org/r1534707 Log: Merging r1534279 through r1534706 from trunk to branch HDFS-2832
Added: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java - copied unchanged from r1534706, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatus.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java - copied unchanged from r1534706, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementStatusDefault.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer-block-info.dust.html - copied unchanged from r1534706, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer-block-info.dust.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.dust.html - copied unchanged from r1534706, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.dust.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html - copied unchanged from r1534706, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js - copied unchanged from r1534706, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/explorer.js hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm - copied unchanged from r1534706, hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/site/apt/HdfsNfsGateway.apt.vm Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ (props changed) hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs:r1534279-1534706 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt Tue Oct 22 17:24:52 2013 @@ -261,6 +261,9 @@ Release 2.3.0 - UNRELEASED HDFS-5379. Update links to datanode information in dfshealth.html. (Haohui Mai via jing9) + HDFS-5382. Implement the UI of browsing filesystems in HTML 5 page. (Haohui + Mai via jing9) + IMPROVEMENTS HDFS-5267. Remove volatile from LightWeightHashSet. (Junping Du via llu) @@ -329,6 +332,9 @@ Release 2.3.0 - UNRELEASED HDFS-4511. Cover package org.apache.hadoop.hdfs.tools with unit test (Andrey Klochkov via jeagles) + HDFS-4885. Improve the verifyBlockPlacement() API in BlockPlacementPolicy. + (Junping Du via szetszwo) + OPTIMIZATIONS HDFS-5239. Allow FSNamesystem lock fairness to be configurable (daryn) @@ -369,6 +375,9 @@ Release 2.3.0 - UNRELEASED HDFS-5336. DataNode should not output 'StartupProgress' metrics. (Akira Ajisaka via cnauroth) + HDFS-5400. DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT constant is set + to the wrong value. (Colin Patrick McCabe) + Release 2.2.1 - UNRELEASED INCOMPATIBLE CHANGES @@ -419,6 +428,8 @@ Release 2.2.1 - UNRELEASED and range in error message. (Kousuke Saruta via suresh) HDFS-5365. Fix libhdfs compile error on FreeBSD9. (Radim Kolar via cnauroth) + + HDFS-5347. Add HDFS NFS user guide. (brandonli) Release 2.2.0 - 2013-10-13 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/pom.xml Tue Oct 22 17:24:52 2013 @@ -550,6 +550,8 @@ http://maven.apache.org/xsd/maven-4.0.0. <exclude>src/main/webapps/static/dust-full-2.0.0.min.js</exclude> <exclude>src/main/webapps/static/dust-helpers-1.1.1.min.js</exclude> <exclude>src/main/webapps/hdfs/dfshealth.dust.html</exclude> + <exclude>src/main/webapps/hdfs/explorer-block-info.dust.html</exclude> + <exclude>src/main/webapps/hdfs/explorer.dust.html</exclude> </excludes> </configuration> </plugin> Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/java:r1534279-1534706 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java Tue Oct 22 17:24:52 2013 @@ -383,7 +383,7 @@ public class DFSConfigKeys extends Commo public static final int DFS_CLIENT_MMAP_CACHE_SIZE_DEFAULT = 1024; public static final String DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS = "dfs.client.mmap.cache.timeout.ms"; public static final long DFS_CLIENT_MMAP_CACHE_TIMEOUT_MS_DEFAULT = 15 * 60 * 1000; - public static final String DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT = "dfs.client.mmap.cache.timeout.ms"; + public static final String DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT = "dfs.client.mmap.cache.thread.runs.per.timeout"; public static final int DFS_CLIENT_MMAP_CACHE_THREAD_RUNS_PER_TIMEOUT_DEFAULT = 4; // property for fsimage compression Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java Tue Oct 22 17:24:52 2013 @@ -100,18 +100,17 @@ public abstract class BlockPlacementPoli } /** - * Verify that the block is replicated on at least minRacks different racks - * if there is more than minRacks rack in the system. + * Verify if the block's placement meets requirement of placement policy, + * i.e. replicas are placed on no less than minRacks racks in the system. * * @param srcPath the full pathname of the file to be verified * @param lBlk block with locations - * @param minRacks number of racks the block should be replicated to - * @return the difference between the required and the actual number of racks - * the block is replicated to. - */ - abstract public int verifyBlockPlacement(String srcPath, - LocatedBlock lBlk, - int minRacks); + * @param numOfReplicas replica number of file to be verified + * @return the result of verification + */ + abstract public BlockPlacementStatus verifyBlockPlacement(String srcPath, + LocatedBlock lBlk, + int numOfReplicas); /** * Decide whether deleting the specified replica of the block still makes * the block conform to the configured block placement policy. Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java Tue Oct 22 17:24:52 2013 @@ -698,22 +698,22 @@ public class BlockPlacementPolicyDefault } @Override - public int verifyBlockPlacement(String srcPath, - LocatedBlock lBlk, - int minRacks) { + public BlockPlacementStatus verifyBlockPlacement(String srcPath, + LocatedBlock lBlk, int numberOfReplicas) { DatanodeInfo[] locs = lBlk.getLocations(); if (locs == null) locs = DatanodeDescriptor.EMPTY_ARRAY; int numRacks = clusterMap.getNumOfRacks(); if(numRacks <= 1) // only one rack - return 0; - minRacks = Math.min(minRacks, numRacks); + return new BlockPlacementStatusDefault( + Math.min(numRacks, numberOfReplicas), numRacks); + int minRacks = Math.min(2, numberOfReplicas); // 1. Check that all locations are different. // 2. Count locations on different racks. Set<String> racks = new TreeSet<String>(); for (DatanodeInfo dn : locs) racks.add(dn.getNetworkLocation()); - return minRacks - racks.size(); + return new BlockPlacementStatusDefault(racks.size(), minRacks); } @Override Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java Tue Oct 22 17:24:52 2013 @@ -413,8 +413,15 @@ public class DatanodeWebHdfsMethods { final long n = length.getValue() != null ? Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) : in.getVisibleLength() - offset.getValue(); - return Response.ok(new OpenEntity(in, n, dfsclient)).type( - MediaType.APPLICATION_OCTET_STREAM).build(); + + /** + * Allow the Web UI to perform an AJAX request to get the data. + */ + return Response.ok(new OpenEntity(in, n, dfsclient)) + .type(MediaType.APPLICATION_OCTET_STREAM) + .header("Access-Control-Allow-Methods", "GET") + .header("Access-Control-Allow-Origin", "*") + .build(); } case GETFILECHECKSUM: { Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java Tue Oct 22 17:24:52 2013 @@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.protocol.H import org.apache.hadoop.hdfs.protocol.LocatedBlock; import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy; +import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementStatus; import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; import org.apache.hadoop.net.NetUtils; @@ -374,9 +375,10 @@ public class NamenodeFsck { locs.length + " replica(s)."); } // verify block placement policy - int missingRacks = BlockPlacementPolicy.getInstance(conf, null, networktopology). - verifyBlockPlacement(path, lBlk, Math.min(2,targetFileReplication)); - if (missingRacks > 0) { + BlockPlacementStatus blockPlacementStatus = + BlockPlacementPolicy.getInstance(conf, null, networktopology). + verifyBlockPlacement(path, lBlk, targetFileReplication); + if (!blockPlacementStatus.isPlacementPolicySatisfied()) { res.numMisReplicatedBlocks++; misReplicatedPerFile++; if (!showFiles) { @@ -385,9 +387,7 @@ public class NamenodeFsck { out.print(path + ": "); } out.println(" Replica placement policy is violated for " + - block + - ". Block should be additionally replicated on " + - missingRacks + " more rack(s)."); + block + ". " + blockPlacementStatus.getErrorDescription()); } report.append(i + ". " + blkName + " len=" + block.getNumBytes()); if (locs.length == 0) { Propchange: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/ ------------------------------------------------------------------------------ Merged /hadoop/common/trunk/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs:r1533208-1534706 Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.dust.html Tue Oct 22 17:24:52 2013 @@ -47,7 +47,7 @@ </div> </div> -<a id="browse-dir-first" style="cursor:pointer">Browse the filesystem</a> <a href="/logs/">NameNode Logs</a> +<a href="explorer.html">Browse the filesystem</a> <a href="/logs/">NameNode Logs</a> <hr/> @@ -56,7 +56,7 @@ <div class="panel-body"> <p> - Security is {#nnstat}{#SecurityModeEnabled}on{:else}off{/SecurityModeEnabled}{/nnstat}.</p> + Security is {#nnstat}{#SecurityEnabled}on{:else}off{/SecurityEnabled}{/nnstat}.</p> <p>{#nn}{#Safemode}{.}{:else}Safemode is off.{/Safemode}{/nn}</p> <p> @@ -207,7 +207,7 @@ </thead> {#nn.LiveNodes} <tr> - <td><a class="browse-dir-links" info-http-addr="{infoAddr}" info-https-addr="{infoSecureAddr}">{name}</a> ({xferaddr})</td> + <td>{name} ({xferaddr})</td> <td>{lastContact}</td> <td>{adminState}</td> <td>{capacity|fmt_bytes}</td> Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/main/webapps/hdfs/dfshealth.js Tue Oct 22 17:24:52 2013 @@ -19,19 +19,6 @@ "use strict"; var data = {}; - function generate_browse_dn_link(info_http_addr, info_https_addr) { - var is_https = window.location.protocol === 'https:'; - var authority = is_https ? info_https_addr : info_http_addr; - - var nn_info_port = window.location.port; - if (nn_info_port === "") { - nn_info_port = is_https ? 443 : 80; - } - - var l = '//' + authority + '/browseDirectory.jsp?dir=%2F&namenodeInfoPort=' + - nn_info_port + '&nnaddr=' + data.nnstat.HostAndPort; - return l; - } function render() { var helpers = { @@ -56,24 +43,7 @@ load_templates(dust, TEMPLATES, function() { dust.render('dfshealth', base.push(data), function(err, out) { - - $('#panel').append(out); - - $('#browse-dir-first').click(function () { - var len = data.nn.LiveNodes.length; - if (len < 1) { - show_err_msg('Cannot browse the DFS since there are no live nodes available.'); - return false; - } - - var dn = data.nn.LiveNodes[Math.floor(Math.random() * len)]; - window.location.href = generate_browse_dn_link(dn.infoAddr, dn.infoSecureAddr); - }); - - $('.browse-dir-links').click(function () { - var http_addr = $(this).attr('info-http-addr'), https_addr = $(this).attr('info-https-addr'); - window.location.href = generate_browse_dn_link(http_addr, https_addr); - }); + $('#panel').html(out); }); }, function () { show_err_msg('Failed to load the page.'); Modified: hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java URL: http://svn.apache.org/viewvc/hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=1534707&r1=1534706&r2=1534707&view=diff ============================================================================== --- hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java (original) +++ hadoop/common/branches/HDFS-2832/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java Tue Oct 22 17:24:52 2013 @@ -83,7 +83,6 @@ import org.apache.log4j.RollingFileAppen import org.junit.Test; import com.google.common.collect.Sets; -import org.mockito.Mockito; import static org.mockito.Mockito.*; /** @@ -892,6 +891,80 @@ public class TestFsck { } } } + + /** + * Tests that the # of misreplaced replicas is correct + * @throws IOException + */ + @Test + public void testFsckMisPlacedReplicas() throws IOException { + // Desired replication factor + final short REPL_FACTOR = 2; + // Number of replicas to actually start + short NUM_DN = 2; + // Number of blocks to write + final short NUM_BLOCKS = 3; + // Set a small-ish blocksize + final long blockSize = 512; + + String [] racks = {"/rack1", "/rack1"}; + String [] hosts = {"host1", "host2"}; + + Configuration conf = new Configuration(); + conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, blockSize); + + MiniDFSCluster cluster = null; + DistributedFileSystem dfs = null; + + try { + // Startup a minicluster + cluster = + new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DN).hosts(hosts) + .racks(racks).build(); + assertNotNull("Failed Cluster Creation", cluster); + cluster.waitClusterUp(); + dfs = (DistributedFileSystem) cluster.getFileSystem(); + assertNotNull("Failed to get FileSystem", dfs); + + // Create a file that will be intentionally under-replicated + final String pathString = new String("/testfile"); + final Path path = new Path(pathString); + long fileLen = blockSize * NUM_BLOCKS; + DFSTestUtil.createFile(dfs, path, fileLen, REPL_FACTOR, 1); + + // Create an under-replicated file + NameNode namenode = cluster.getNameNode(); + NetworkTopology nettop = cluster.getNamesystem().getBlockManager() + .getDatanodeManager().getNetworkTopology(); + // Add a new node on different rack, so previous blocks' replicas + // are considered to be misplaced + nettop.add(DFSTestUtil.getDatanodeDescriptor("/rack2", "/host3")); + NUM_DN++; + + Map<String,String[]> pmap = new HashMap<String, String[]>(); + Writer result = new StringWriter(); + PrintWriter out = new PrintWriter(result, true); + InetAddress remoteAddress = InetAddress.getLocalHost(); + NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out, + NUM_DN, (short)REPL_FACTOR, remoteAddress); + + // Run the fsck and check the Result + final HdfsFileStatus file = + namenode.getRpcServer().getFileInfo(pathString); + assertNotNull(file); + Result res = new Result(conf); + fsck.check(pathString, file, res); + // check misReplicatedBlock number. + assertEquals(res.numMisReplicatedBlocks, NUM_BLOCKS); + } finally { + if(dfs != null) { + dfs.close(); + } + if(cluster != null) { + cluster.shutdown(); + } + } + } /** Test fsck with FileNotFound */ @Test