Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fe9d1ee96 -> c1df6f3b4


Revert HDFS-6940.

Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c1df6f3b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c1df6f3b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c1df6f3b

Branch: refs/heads/branch-2
Commit: c1df6f3b4027153a9fa12916bfda50d5e8b996ac
Parents: fe9d1ee
Author: Konstantin V Shvachko <s...@apache.org>
Authored: Tue Sep 9 17:34:25 2014 -0700
Committer: Konstantin V Shvachko <s...@apache.org>
Committed: Tue Sep 9 17:34:25 2014 -0700

----------------------------------------------------------------------
 hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt     |  2 -
 .../server/blockmanagement/BlockManager.java    | 23 ++--------
 .../server/blockmanagement/DatanodeManager.java |  6 +--
 .../server/blockmanagement/HostFileManager.java |  4 --
 .../hdfs/server/namenode/FSNamesystem.java      | 46 +++++++++-----------
 .../hdfs/server/namenode/NameNodeAdapter.java   |  2 +-
 6 files changed, 26 insertions(+), 57 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1df6f3b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
----------------------------------------------------------------------
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index cdb7353..6171a39 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -188,8 +188,6 @@ Release 2.6.0 - UNRELEASED
     HDFS-6376. Distcp data between two HA clusters requires another 
configuration.
     (Dave Marion and Haohui Mai via jing9)
 
-    HDFS-6940. Refactoring to allow ConsensusNode implementation. (shv)
-
     HDFS-6943. Improve NN allocateBlock log to include replicas' datanode IPs.
     (Ming Ma via wheat9)
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1df6f3b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index de02de1..5d23c1f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -164,7 +164,7 @@ public class BlockManager {
   final BlocksMap blocksMap;
 
   /** Replication thread. */
-  Daemon replicationThread;
+  final Daemon replicationThread = new Daemon(new ReplicationMonitor());
   
   /** Store blocks -> datanodedescriptor(s) map of corrupt replicas */
   final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
@@ -263,7 +263,6 @@ public class BlockManager {
     this.namesystem = namesystem;
     datanodeManager = new DatanodeManager(this, namesystem, conf);
     heartbeatManager = datanodeManager.getHeartbeatManager();
-    setReplicationMonitor(new ReplicationMonitor());
 
     final long pendingPeriod = conf.getLong(
         DFSConfigKeys.DFS_NAMENODE_STARTUP_DELAY_BLOCK_DELETION_SEC_KEY,
@@ -395,23 +394,7 @@ public class BlockManager {
           lifetimeMin*60*1000L, 0, null, encryptionAlgorithm);
     }
   }
-
-  public long getReplicationRecheckInterval() {
-    return replicationRecheckInterval;
-  }
-
-  public AtomicLong excessBlocksCount() {
-    return excessBlocksCount;
-  }
-
-  public void clearInvalidateBlocks() {
-    invalidateBlocks.clear();
-  }
-
-  void setReplicationMonitor(Runnable replicationMonitor) {
-    replicationThread = new Daemon(replicationMonitor);
-  }
-
+  
   public void setBlockPoolId(String blockPoolId) {
     if (isBlockTokenEnabled()) {
       blockTokenSecretManager.setBlockPoolId(blockPoolId);
@@ -1636,7 +1619,7 @@ public class BlockManager {
    * If there were any replication requests that timed out, reap them
    * and put them back into the neededReplication queue
    */
-  void processPendingReplications() {
+  private void processPendingReplications() {
     Block[] timedOutItems = pendingReplications.getTimedOutBlocks();
     if (timedOutItems != null) {
       namesystem.writeLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1df6f3b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index dcf6a0a..ed257a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1052,7 +1052,7 @@ public class DatanodeManager {
    * 3. Added to exclude --> start decommission.
    * 4. Removed from exclude --> stop decommission.
    */
-  void refreshDatanodes() {
+  private void refreshDatanodes() {
     for(DatanodeDescriptor node : datanodeMap.values()) {
       // Check if not include.
       if (!hostFileManager.isIncluded(node)) {
@@ -1585,9 +1585,5 @@ public class DatanodeManager {
   public void setShouldSendCachingCommands(boolean shouldSendCachingCommands) {
     this.shouldSendCachingCommands = shouldSendCachingCommands;
   }
-
-  public HostFileManager getHostFileManager() {
-    return this.hostFileManager;
-  }
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1df6f3b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
index 7db23e4..0b8d6c5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/HostFileManager.java
@@ -129,10 +129,6 @@ class HostFileManager {
   void refresh(String includeFile, String excludeFile) throws IOException {
     HostSet newIncludes = readFile("included", includeFile);
     HostSet newExcludes = readFile("excluded", excludeFile);
-    setHosts(newIncludes, newExcludes);
-  }
-
-  void setHosts(HostSet newIncludes, HostSet newExcludes) {
     synchronized (this) {
       includes = newIncludes;
       excludes = newExcludes;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1df6f3b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 34f5f05..fa77526 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -982,7 +982,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     return Collections.unmodifiableList(auditLoggers);
   }
 
-  protected void loadFSImage(StartupOption startOpt) throws IOException {
+  private void loadFSImage(StartupOption startOpt) throws IOException {
     final FSImage fsImage = getFSImage();
 
     // format before starting up if requested
@@ -1030,7 +1030,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     imageLoadComplete();
   }
 
-  protected void startSecretManager() {
+  private void startSecretManager() {
     if (dtSecretManager != null) {
       try {
         dtSecretManager.startThreads();
@@ -1042,7 +1042,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     }
   }
   
-  protected void startSecretManagerIfNecessary() {
+  private void startSecretManagerIfNecessary() {
     boolean shouldRun = shouldUseDelegationTokens() &&
       !isInSafeMode() && getEditLog().isOpenForWrite();
     boolean running = dtSecretManager.isRunning();
@@ -1192,7 +1192,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     return haEnabled && inActiveState() && startingActiveService;
   }
 
-  protected boolean shouldUseDelegationTokens() {
+  private boolean shouldUseDelegationTokens() {
     return UserGroupInformation.isSecurityEnabled() ||
       alwaysUseDelegationTokensForTests;
   }
@@ -2736,7 +2736,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
    * @throws UnresolvedLinkException
    * @throws IOException
    */
-  protected
   LocatedBlock prepareFileForWrite(String src, INodeFile file,
                                    String leaseHolder, String clientMachine,
                                    boolean writeToEditLog,
@@ -3199,7 +3198,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     return new FileState(pendingFile, src);
   }
 
-  protected
   LocatedBlock makeLocatedBlock(Block blk, DatanodeStorageInfo[] locs,
                                         long offset) throws IOException {
     LocatedBlock lBlk = new LocatedBlock(
@@ -3317,8 +3315,8 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     return true;
   }
 
-  protected INodeFile checkLease(String src, String holder, INode inode,
-                                 long fileId)
+  private INodeFile checkLease(String src, String holder, INode inode,
+                               long fileId)
       throws LeaseExpiredException, FileNotFoundException {
     assert hasReadLock();
     final String ident = src + " (inode " + fileId + ")";
@@ -4435,7 +4433,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     return leaseManager.reassignLease(lease, src, newHolder);
   }
 
-  protected void commitOrCompleteLastBlock(final INodeFile fileINode,
+  private void commitOrCompleteLastBlock(final INodeFile fileINode,
       final Block commitBlock) throws IOException {
     assert hasWriteLock();
     Preconditions.checkArgument(fileINode.isUnderConstruction());
@@ -4831,7 +4829,6 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
    * @return an array of datanode commands 
    * @throws IOException
    */
-  protected
   HeartbeatResponse handleHeartbeat(DatanodeRegistration nodeReg,
       StorageReport[] reports, long cacheCapacity, long cacheUsed,
       int xceiverCount, int xmitsInProgress, int failedVolumes)
@@ -4881,8 +4878,8 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
    * @param file
    * @param logRetryCache
    */
-  protected void persistBlocks(String path, INodeFile file,
-                               boolean logRetryCache) {
+  private void persistBlocks(String path, INodeFile file,
+                             boolean logRetryCache) {
     assert hasWriteLock();
     Preconditions.checkArgument(file.isUnderConstruction());
     getEditLog().logUpdateBlocks(path, file, logRetryCache);
@@ -5313,7 +5310,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
    * @param path
    * @param file
    */
-  protected void persistNewBlock(String path, INodeFile file) {
+  private void persistNewBlock(String path, INodeFile file) {
     Preconditions.checkArgument(file.isUnderConstruction());
     getEditLog().logAddBlock(path, file);
     if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -7191,7 +7188,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
    * 
    * @return true if delegation token operation is allowed
    */
-  protected boolean isAllowedDelegationTokenOp() throws IOException {
+  private boolean isAllowedDelegationTokenOp() throws IOException {
     AuthenticationMethod authMethod = getConnectionAuthenticationMethod();
     if (UserGroupInformation.isSecurityEnabled()
         && (authMethod != AuthenticationMethod.KERBEROS)
@@ -7358,13 +7355,7 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
     final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
     blockManager.getDatanodeManager().fetchDatanodes(live, null, true);
     for (DatanodeDescriptor node : live) {
-      info.put(node.getHostName(), getLiveNodeInfo(node));
-    }
-    return JSON.toString(info);
-  }
-
-  protected Map<String, Object> getLiveNodeInfo(DatanodeDescriptor node) {
-    return ImmutableMap.<String, Object>builder()
+      Map<String, Object> innerinfo = ImmutableMap.<String, Object>builder()
           .put("infoAddr", node.getInfoAddr())
           .put("infoSecureAddr", node.getInfoSecureAddr())
           .put("xferaddr", node.getXferAddr())
@@ -7382,6 +7373,10 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
           .put("blockPoolUsedPercent", node.getBlockPoolUsedPercent())
           .put("volfails", node.getVolumeFailures())
           .build();
+
+      info.put(node.getHostName(), innerinfo);
+    }
+    return JSON.toString(info);
   }
 
   /**
@@ -7666,16 +7661,17 @@ public class FSNamesystem implements Namesystem, 
FSClusterStats,
   public ReentrantLock getLongReadLockForTests() {
     return fsLock.longReadLock;
   }
+
+  @VisibleForTesting
+  public SafeModeInfo getSafeModeInfoForTests() {
+    return safeMode;
+  }
   
   @VisibleForTesting
   public void setNNResourceChecker(NameNodeResourceChecker nnResourceChecker) {
     this.nnResourceChecker = nnResourceChecker;
   }
 
-  public SafeModeInfo getSafeModeInfo() {
-    return safeMode;
-  }
-
   @Override
   public boolean isAvoidingStaleDataNodesForWrite() {
     return this.blockManager.getDatanodeManager()

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c1df6f3b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index d65d1ff..c32ed67 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -223,7 +223,7 @@ public class NameNodeAdapter {
    * if safemode is not running.
    */
   public static int getSafeModeSafeBlocks(NameNode nn) {
-    SafeModeInfo smi = nn.getNamesystem().getSafeModeInfo();
+    SafeModeInfo smi = nn.getNamesystem().getSafeModeInfoForTests();
     if (smi == null) {
       return -1;
     }

Reply via email to