Author: szetszwo
Date: Wed Jan 14 17:39:11 2009
New Revision: 734591

URL: http://svn.apache.org/viewvc?rev=734591&view=rev
Log:
HADOOP-5017. Change NameNode.namesystem declaration to private.  (szetszwo)

Modified:
    hadoop/core/trunk/CHANGES.txt
    
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
    
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
    
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSRename.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLease.java
    hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
    
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java

Modified: hadoop/core/trunk/CHANGES.txt
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/CHANGES.txt?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- hadoop/core/trunk/CHANGES.txt (original)
+++ hadoop/core/trunk/CHANGES.txt Wed Jan 14 17:39:11 2009
@@ -32,6 +32,8 @@
     HADOOP-4985. Remove unnecessary "throw IOException" declarations in
     FSDirectory related methods.  (szetszwo)
 
+    HADOOP-5017. Change NameNode.namesystem declaration to private.  (szetszwo)
+
   OPTIMIZATIONS
 
   BUG FIXES

Modified: 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
 (original)
+++ 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
 Wed Jan 14 17:39:11 2009
@@ -33,7 +33,6 @@
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ipc.RemoteException;
@@ -55,7 +54,7 @@
       final UserGroupInformation ugi = getUGI(request);
       final ServletContext context = getServletContext();
       final NameNode namenode = (NameNode)context.getAttribute("name.node");
-      final DatanodeID datanode = namenode.namesystem.getRandomDatanode();
+      final DatanodeID datanode = namenode.getNamesystem().getRandomDatanode();
       try {
         final URI uri = createRedirectUri("/getFileChecksum", ugi, datanode, 
request); 
         response.sendRedirect(uri.toURL().toString());

Modified: 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
 (original)
+++ 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
 Wed Jan 14 17:39:11 2009
@@ -53,8 +53,9 @@
         UnixUserGroupInformation.UGI_PROPERTY_NAME, ugi);
 
     final NameNode nn = (NameNode) context.getAttribute("name.node");
-    final int totalDatanodes = 
nn.namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
-    final short minReplication = nn.namesystem.getMinReplication();
+    final FSNamesystem namesystem = nn.getNamesystem();
+    final int totalDatanodes = 
namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
+    final short minReplication = namesystem.getMinReplication();
 
     new NamenodeFsck(conf, nn, nn.getNetworkTopology(), pmap, out,
         totalDatanodes, minReplication).fsck();

Modified: 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java 
(original)
+++ 
hadoop/core/trunk/src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java 
Wed Jan 14 17:39:11 2009
@@ -121,7 +121,8 @@
 
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   public static final Log stateChangeLog = 
LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
-  public FSNamesystem namesystem; // TODO: This should private. Use 
getNamesystem() instead. 
+
+  private FSNamesystem namesystem; 
   /** RPC server */
   private Server server;
   /** RPC server address */
@@ -144,6 +145,9 @@
 
   static NameNodeMetrics myMetrics;
 
+  /** Return the {...@link FSNamesystem} object.
+   * @return {...@link FSNamesystem} object.
+   */
   public FSNamesystem getNamesystem() {
     return namesystem;
   }

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java 
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java Wed 
Jan 14 17:39:11 2009
@@ -508,6 +508,14 @@
   }
   
   /**
+   * Return the {...@link FSNamesystem} object.
+   * @return {...@link FSNamesystem} object.
+   */
+  public FSNamesystem getNamesystem() {
+    return nameNode.getNamesystem();
+  }
+
+  /**
    * Gets a list of the started DataNodes.  May be empty.
    */
   public ArrayList<DataNode> getDataNodes() {
@@ -814,8 +822,9 @@
    * Set the softLimit and hardLimit of client lease periods
    */
   void setLeasePeriod(long soft, long hard) {
-    nameNode.namesystem.leaseManager.setLeasePeriod(soft, hard);
-    nameNode.namesystem.lmthread.interrupt();
+    final FSNamesystem namesystem = nameNode.getNamesystem();
+    namesystem.leaseManager.setLeasePeriod(soft, hard);
+    namesystem.lmthread.interrupt();
   }
 
   /**

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
 (original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
 Wed Jan 14 17:39:11 2009
@@ -52,7 +52,7 @@
     ((DFSOutputStream)(out.getWrappedStream())).sync();
     
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
-    cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
+    cluster.getNamesystem().DFSNodesStatus(dnList, dnList);
     DatanodeDescriptor dn = dnList.get(0);
     
     assertEquals(1, dn.getBlocksScheduled());

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSRename.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSRename.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSRename.java 
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDFSRename.java Wed 
Jan 14 17:39:11 2009
@@ -26,7 +26,7 @@
 
 public class TestDFSRename extends junit.framework.TestCase {
   static int countLease(MiniDFSCluster cluster) {
-    return cluster.getNameNode().namesystem.leaseManager.countLease();
+    return cluster.getNamesystem().leaseManager.countLease();
   }
   
   final Path dir = new Path("/test/rename/");

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java 
(original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java 
Wed Jan 14 17:39:11 2009
@@ -282,12 +282,10 @@
     conf.setLong("dfs.replication.interval", 30);
     conf.setLong("dfs.heartbeat.interval", 30L);
     conf.setBoolean("dfs.replication.considerLoad", false);
-    Random random = new Random();
     FileSystem fs = null;
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
     int replicaCount = 0;
-    int rand = random.nextInt(numDataNodes);
 
     MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, 
null);
     cluster.waitActive();
@@ -330,7 +328,7 @@
      cluster.restartDataNode(corruptReplicasDNIDs[i]);
 
     // Loop until all corrupt replicas are reported
-    int corruptReplicaSize = cluster.getNameNode().namesystem.
+    int corruptReplicaSize = cluster.getNamesystem().
                               corruptReplicas.numCorruptReplicas(blk);
     while (corruptReplicaSize != numCorruptReplicas) {
       try {
@@ -344,7 +342,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      corruptReplicaSize = cluster.getNameNode().namesystem.
+      corruptReplicaSize = cluster.getNamesystem().
                               corruptReplicas.numCorruptReplicas(blk);
     }
     
@@ -365,7 +363,7 @@
 
     // Make sure the corrupt replica is invalidated and removed from
     // corruptReplicasMap
-    corruptReplicaSize = cluster.getNameNode().namesystem.
+    corruptReplicaSize = cluster.getNamesystem().
                           corruptReplicas.numCorruptReplicas(blk);
     while (corruptReplicaSize != 0 || replicaCount != numReplicas) {
       try {
@@ -373,7 +371,7 @@
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      corruptReplicaSize = cluster.getNameNode().namesystem.
+      corruptReplicaSize = cluster.getNamesystem().
                             corruptReplicas.numCorruptReplicas(blk);
       blocks = dfsClient.namenode.
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java 
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestDecommission.java Wed 
Jan 14 17:39:11 2009
@@ -183,7 +183,7 @@
     ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
     nodes.add(nodename);
     writeConfigFile(localFileSys, excludeFile, nodes);
-    namenode.namesystem.refreshNodes(conf);
+    namenode.getNamesystem().refreshNodes(conf);
     return nodename;
   }
 

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java 
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestFileCorruption.java 
Wed Jan 14 17:39:11 2009
@@ -18,15 +18,17 @@
 
 package org.apache.hadoop.hdfs;
 
-import java.io.*;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.File;
+import java.io.FileOutputStream;
 import java.util.ArrayList;
 
-import junit.framework.*;
+import junit.framework.TestCase;
+
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.BlockLocation;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -37,17 +39,6 @@
  * A JUnit test for corrupted file handling.
  */
 public class TestFileCorruption extends TestCase {
-  
-  public TestFileCorruption(String testName) {
-    super(testName);
-  }
-
-  protected void setUp() throws Exception {
-  }
-
-  protected void tearDown() throws Exception {
-  }
-  
   /** check if DFS can handle corrupted blocks properly */
   public void testFileCorruption() throws Exception {
     MiniDFSCluster cluster = null;
@@ -134,7 +125,7 @@
       DataNode dataNode = datanodes.get(2);
       
       // report corrupted block by the third datanode
-      cluster.getNameNode().namesystem.markBlockAsCorrupt(blk, 
+      cluster.getNamesystem().markBlockAsCorrupt(blk, 
           new DatanodeInfo(dataNode.dnRegistration ));
       
       // open the file

Modified: hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLease.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLease.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLease.java (original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLease.java Wed Jan 14 
17:39:11 2009
@@ -25,7 +25,7 @@
 
 public class TestLease extends junit.framework.TestCase {
   static boolean hasLease(MiniDFSCluster cluster, Path src) {
-    return 
cluster.getNameNode().namesystem.leaseManager.getLeaseByPath(src.toString()) != 
null;
+    return cluster.getNamesystem().leaseManager.getLeaseByPath(src.toString()) 
!= null;
   }
   
   final Path dir = new Path("/test/lease/");

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java 
(original)
+++ hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java 
Wed Jan 14 17:39:11 2009
@@ -123,7 +123,7 @@
 
       BlockMetaDataInfo[] updatedmetainfo = new 
BlockMetaDataInfo[REPLICATION_NUM];
       int minsize = min(newblocksizes);
-      long currentGS = cluster.getNameNode().namesystem.getGenerationStamp();
+      long currentGS = cluster.getNamesystem().getGenerationStamp();
       lastblock.setGenerationStamp(currentGS);
       for(int i = 0; i < REPLICATION_NUM; i++) {
         updatedmetainfo[i] = idps[i].getBlockMetaDataInfo(lastblock);

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
 (original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
 Wed Jan 14 17:39:11 2009
@@ -84,7 +84,7 @@
       DFSTestUtil.waitReplication(fs, fileName, (short)1);
 
       // get the block belonged to the created file
-      LocatedBlocks blocks = 
cluster.getNameNode().namesystem.getBlockLocations(
+      LocatedBlocks blocks = cluster.getNamesystem().getBlockLocations(
           fileName.toString(), 0, (long)fileLen);
       assertEquals(blocks.locatedBlockCount(), 1);
       LocatedBlock block = blocks.get(0);

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 (original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
 Wed Jan 14 17:39:11 2009
@@ -1004,12 +1004,14 @@
     }
 
     void generateInputs(int[] ignore) throws IOException {
+      final FSNamesystem namesystem = nameNode.getNamesystem();
+
       // start data-nodes; create a bunch of files; generate block reports.
       blockReportObject.generateInputs(ignore);
       // stop replication monitor
-      nameNode.namesystem.replthread.interrupt();
+      namesystem.replthread.interrupt();
       try {
-        nameNode.namesystem.replthread.join();
+        namesystem.replthread.join();
       } catch(InterruptedException ei) {
         return;
       }
@@ -1021,7 +1023,7 @@
       // decommission data-nodes
       decommissionNodes();
       // set node replication limit
-      nameNode.namesystem.setNodeReplicationLimit(nodeReplicationLimit);
+      namesystem.setNodeReplicationLimit(nodeReplicationLimit);
     }
 
     private void decommissionNodes() throws IOException {
@@ -1052,7 +1054,7 @@
       assert daemonId < numThreads : "Wrong daemonId.";
       long start = System.currentTimeMillis();
       // compute data-node work
-      int work = nameNode.namesystem.computeDatanodeWork();
+      int work = nameNode.getNamesystem().computeDatanodeWork();
       long end = System.currentTimeMillis();
       numPendingBlocks += work;
       if(work == 0)

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java 
(original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java 
Wed Jan 14 17:39:11 2009
@@ -358,7 +358,7 @@
       DFSTestUtil.waitReplication(fs, filePath, (short)1);
       
       // intentionally corrupt NN data structure
-      INodeFile node = 
(INodeFile)cluster.getNameNode().namesystem.dir.rootDir.getNode(fileName);
+      INodeFile node = 
(INodeFile)cluster.getNamesystem().dir.rootDir.getNode(fileName);
       assertEquals(node.blocks.length, 1);
       node.blocks[0].setNumBytes(-1L);  // set the block length to be negative
       

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
 (original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
 Wed Jan 14 17:39:11 2009
@@ -55,7 +55,7 @@
       cluster = new MiniDFSCluster(conf, 1, true, null);
       cluster.waitActive();
       
-      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      final FSNamesystem namesystem = cluster.getNamesystem();
       
       // Ensure the data reported for each data node is right
       ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
 (original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
 Wed Jan 14 17:39:11 2009
@@ -26,7 +26,7 @@
     final MiniDFSCluster cluster = 
       new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
     try {
-      final FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      final FSNamesystem namesystem = cluster.getNamesystem();
       final FileSystem fs = cluster.getFileSystem();
       
       // populate the cluster with a one block file

Modified: 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
URL: 
http://svn.apache.org/viewvc/hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java?rev=734591&r1=734590&r2=734591&view=diff
==============================================================================
--- 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
 (original)
+++ 
hadoop/core/trunk/src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java
 Wed Jan 14 17:39:11 2009
@@ -25,7 +25,7 @@
       
       // remove one replica from the blocksMap so block becomes 
under-replicated
       // but the block does not get put into the under-replicated blocks queue
-      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      final FSNamesystem namesystem = cluster.getNamesystem();
       Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       DatanodeDescriptor dn = namesystem.blocksMap.nodeIterator(b).next();
       namesystem.addToInvalidates(b, dn);


Reply via email to