HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and 
#getFileInfo() function. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ac07b72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ac07b72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ac07b72

Branch: refs/heads/HDFS-10285
Commit: 3ac07b720b7839a7fe6c83f4ccfe319b6a892501
Parents: 39ed3a6
Author: Uma Maheswara Rao Gangumalla <umamah...@apache.org>
Authored: Sat Aug 11 23:22:59 2018 -0700
Committer: Uma Maheswara Rao Gangumalla <umamah...@apache.org>
Committed: Sun Aug 12 03:06:07 2018 -0700

----------------------------------------------------------------------
 .../router/RouterNamenodeProtocol.java          |  1 +
 .../server/blockmanagement/BlockManager.java    | 34 ------------
 .../blockmanagement/DatanodeDescriptor.java     |  2 +-
 .../server/blockmanagement/DatanodeManager.java | 17 ------
 .../hdfs/server/datanode/BPServiceActor.java    | 16 ------
 .../hdfs/server/namenode/FSNamesystem.java      | 38 -------------
 .../hadoop/hdfs/server/namenode/Namesystem.java | 22 --------
 .../sps/BlockStorageMovementNeeded.java         | 18 ++-----
 .../hdfs/server/namenode/sps/Context.java       | 28 ----------
 .../hdfs/server/namenode/sps/SPSService.java    |  5 +-
 .../namenode/sps/StoragePolicySatisfier.java    | 19 +++----
 .../hdfs/server/sps/ExternalSPSContext.java     | 57 +++++---------------
 .../sps/ExternalStoragePolicySatisfier.java     |  2 +-
 .../src/site/markdown/ArchivalStorage.md        |  2 +-
 .../TestPersistentStoragePolicySatisfier.java   | 10 +++-
 ...stStoragePolicySatisfierWithStripedFile.java |  2 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  4 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  2 +-
 18 files changed, 39 insertions(+), 240 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index edfb391..bf0db6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -187,6 +187,7 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 
   @Override
   public Long getNextSPSPath() throws IOException {
+    rpcServer.checkOperation(OperationCategory.READ, false);
     // not supported
     return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 87bd155..d8a3aa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4300,21 +4300,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check file has low redundancy blocks.
-   */
-  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
-    boolean result = false;
-    for (BlockInfo block : bc.getBlocks()) {
-      short expected = getExpectedRedundancyNum(block);
-      final NumberReplicas n = countNodes(block);
-      if (expected > n.liveReplicas()) {
-        result = true;
-      }
-    }
-    return result;
-  }
-
-  /**
    * Check sufficient redundancy of the blocks in the collection. If any block
    * is needed reconstruction, insert it into the reconstruction queue.
    * Otherwise, if the block is more than the expected replication factor,
@@ -5011,25 +4996,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check whether file id has low redundancy blocks.
-   *
-   * @param inodeID
-   *          - inode id
-   */
-  public boolean hasLowRedundancyBlocks(long inodeID) {
-    namesystem.readLock();
-    try {
-      BlockCollection bc = namesystem.getBlockCollection(inodeID);
-      if (bc == null) {
-        return false;
-      }
-      return hasLowRedundancyBlocks(bc);
-    } finally {
-      namesystem.readUnlock();
-    }
-  }
-
-  /**
    * Create SPS manager instance. It manages the user invoked sps paths and 
does
    * the movement.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
index 9c96f16..12b5c33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
@@ -793,7 +793,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
 
   /** Increment the number of blocks scheduled. */
-  public void incrementBlocksScheduled(StorageType t) {
+  void incrementBlocksScheduled(StorageType t) {
     currApproxBlocksScheduled.add(t, 1);
   }
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index 4173f48..7d5d73c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1988,22 +1988,5 @@ public class DatanodeManager {
     }
     return reports;
   }
-
-  public boolean verifyTargetDatanodeHasSpaceForScheduling(DatanodeInfo dn,
-      StorageType type, long estimatedSize) {
-    namesystem.readLock();
-    try {
-      DatanodeDescriptor datanode =
-          blockManager.getDatanodeManager().getDatanode(dn.getDatanodeUuid());
-      if (datanode == null) {
-        LOG.debug("Target datanode: " + dn + " doesn't exists");
-        return false;
-      }
-      return null != datanode.chooseStorage4Block(type, estimatedSize);
-    } finally {
-      namesystem.readUnlock();
-    }
-  }
-
 }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
index dab8ae9..f09ff66 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPServiceActor.java
@@ -39,7 +39,6 @@ import java.util.concurrent.atomic.AtomicBoolean;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
 import org.apache.hadoop.hdfs.client.BlockReportOptions;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -51,7 +50,6 @@ import 
org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
 import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
@@ -534,20 +532,6 @@ class BPServiceActor implements Runnable {
     return response;
   }
 
-  private BlocksStorageMoveAttemptFinished getStorageMoveAttemptFinishedBlocks(
-      List<Block> finishedBlks) {
-
-    if (finishedBlks.isEmpty()) {
-      return null;
-    }
-
-    // Create BlocksStorageMoveAttemptFinished with currently finished
-    // blocks
-    Block[] blockList = new Block[finishedBlks.size()];
-    finishedBlks.toArray(blockList);
-    return new BlocksStorageMoveAttemptFinished(blockList);
-  }
-
   @VisibleForTesting
   void sendLifelineForTests() throws IOException {
     lifelineSender.sendLifeline();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index 7bc9ecc..ecf7fce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3139,29 +3139,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
    * @param src The string representation of the path to the file
    * @param resolveLink whether to throw UnresolvedLinkException
    *        if src refers to a symlink
-   * @param needLocation if blockLocations need to be returned
-   *
-   * @throws AccessControlException
-   *           if access is denied
-   * @throws UnresolvedLinkException
-   *           if a symlink is encountered.
-   *
-   * @return object containing information regarding the file or null if file
-   *         not found
-   * @throws StandbyException
-   */
-  @Override
-  public HdfsFileStatus getFileInfo(final String src, boolean resolveLink,
-      boolean needLocation) throws IOException {
-    return getFileInfo(src, resolveLink, needLocation, false);
-  }
-
-  /**
-   * Get the file info for a specific file.
-   *
-   * @param src The string representation of the path to the file
-   * @param resolveLink whether to throw UnresolvedLinkException
-   *        if src refers to a symlink
    *
    * @param needLocation Include {@link LocatedBlocks} in result.
    * @param needBlockToken Include block tokens in {@link LocatedBlocks}
@@ -3604,21 +3581,6 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   }
 
   @Override
-  public boolean isFileOpenedForWrite(String path) {
-    readLock();
-    try {
-      INode inode = dir.getINode(path, FSDirectory.DirOp.READ);
-      INodeFile iNodeFile = INodeFile.valueOf(inode, path);
-      LeaseManager.Lease lease = leaseManager.getLease(iNodeFile);
-      return lease != null;
-    } catch (IOException e) {
-      return false;
-    } finally {
-      readUnlock();
-    }
-  }
-
-  @Override
   public boolean isInSnapshot(long blockCollectionID) {
     assert hasReadLock();
     final INodeFile bc = getBlockCollection(blockCollectionID);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
index 82af4d2..2a52587 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
@@ -20,7 +20,6 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.util.RwLock;
@@ -50,31 +49,10 @@ public interface Namesystem extends RwLock, SafeMode {
   boolean inTransitionToActive();
 
   /**
-   * Check if file is been opened for write purpose.
-   * @param filePath
-   * @return true if valid write lease exists, otherwise return false.
-   */
-  boolean isFileOpenedForWrite(String filePath);
-
-  /**
    * Remove xAttr from the inode.
    * @param id
    * @param xattrName
    * @throws IOException
    */
   void removeXattr(long id, String xattrName) throws IOException;
-
-  /**
-   * Gets the fileInfo of the given file path.
-   *
-   * @param filePath string representation of the path to the file
-   * @param resolveLink whether to throw UnresolvedLinkException
-   *        if src refers to a symlink
-   * @param needLocation if blockLocations need to be returned
-   *
-   * @return hdfs file status details
-   * @throws IOException
-   */
-  HdfsFileStatus getFileInfo(String filePath, boolean resolveLink,
-      boolean needLocation) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index b990bc5..02b9cff 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode.sps;
 
 import java.io.IOException;
 import java.util.HashMap;
-import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
@@ -79,7 +78,9 @@ public class BlockStorageMovementNeeded {
    *          - track info for satisfy the policy
    */
   public synchronized void add(ItemInfo trackInfo) {
-    storageMovementNeeded.add(trackInfo);
+    if (trackInfo != null) {
+      storageMovementNeeded.add(trackInfo);
+    }
   }
 
   /**
@@ -153,7 +154,6 @@ public class BlockStorageMovementNeeded {
   }
 
   public synchronized void clearAll() {
-    ctxt.removeAllSPSPathIds();
     storageMovementNeeded.clear();
     pendingWorkForDirectory.clear();
   }
@@ -188,18 +188,6 @@ public class BlockStorageMovementNeeded {
     }
   }
 
-  public synchronized void clearQueue(long trackId) {
-    ctxt.removeSPSPathId(trackId);
-    Iterator<ItemInfo> iterator = storageMovementNeeded.iterator();
-    while (iterator.hasNext()) {
-      ItemInfo next = iterator.next();
-      if (next.getFile() == trackId) {
-        iterator.remove();
-      }
-    }
-    pendingWorkForDirectory.remove(trackId);
-  }
-
   /**
    * Clean all the movements in spsDirsToBeTraveresed/storageMovementNeeded
    * and notify to clean up required resources.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
index afa5a50..b27294c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/Context.java
@@ -22,8 +22,6 @@ import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
-import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -31,7 +29,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.Datanod
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.AccessControlException;
 
 /**
  * An interface for the communication between SPS and Namenode module.
@@ -51,21 +48,6 @@ public interface Context {
   boolean isInSafeMode();
 
   /**
-   * Returns true if Mover tool is already running, false otherwise.
-   */
-  boolean isMoverRunning();
-
-  /**
-   * Gets the Inode ID number for the given path.
-   *
-   * @param path
-   *          - file/dir path
-   * @return Inode id number
-   */
-  long getFileID(String path) throws UnresolvedLinkException,
-      AccessControlException, ParentNotDirectoryException;
-
-  /**
    * Gets the network topology.
    *
    * @param datanodeMap
@@ -132,16 +114,6 @@ public interface Context {
   Long getNextSPSPath();
 
   /**
-   * Removes the SPS path id.
-   */
-  void removeSPSPathId(long pathId);
-
-  /**
-   * Removes all SPS path ids.
-   */
-  void removeAllSPSPathIds();
-
-  /**
    * Do scan and collects the files under that directory and adds to the given
    * BlockStorageMovementNeeded.
    *

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
index a62dd93..a83d32a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/SPSService.java
@@ -47,12 +47,9 @@ public interface SPSService {
    * Starts the SPS service. Make sure to initialize the helper services before
    * invoking this method.
    *
-   * @param reconfigStart
-   *          - to indicate whether the SPS startup requested from
-   *          reconfiguration service
    * @param spsMode sps service mode
    */
-  void start(boolean reconfigStart, StoragePolicySatisfierMode spsMode);
+  void start(StoragePolicySatisfierMode spsMode);
 
   /**
    * Stops the SPS service gracefully. Timed wait to stop storage policy

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
index 7ebd23d..4c04b46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/StoragePolicySatisfier.java
@@ -150,21 +150,14 @@ public class StoragePolicySatisfier implements 
SPSService, Runnable {
    * movements monitor for retry the attempts if needed.
    */
   @Override
-  public synchronized void start(boolean reconfigStart,
-      StoragePolicySatisfierMode serviceMode) {
+  public synchronized void start(StoragePolicySatisfierMode serviceMode) {
     if (serviceMode == StoragePolicySatisfierMode.NONE) {
       LOG.error("Can't start StoragePolicySatisfier for the given mode:{}",
           serviceMode);
       return;
     }
-    if (reconfigStart) {
-      LOG.info("Starting {} StoragePolicySatisfier, as admin requested to "
-          + "start it.", StringUtils.toLowerCase(serviceMode.toString()));
-    } else {
-      LOG.info("Starting {} StoragePolicySatisfier.",
-          StringUtils.toLowerCase(serviceMode.toString()));
-    }
-
+    LOG.info("Starting {} StoragePolicySatisfier.",
+        StringUtils.toLowerCase(serviceMode.toString()));
     isRunning = true;
     storagePolicySatisfierThread = new Daemon(this);
     storagePolicySatisfierThread.setName("StoragePolicySatisfier");
@@ -229,8 +222,8 @@ public class StoragePolicySatisfier implements SPSService, 
Runnable {
         }
         continue;
       }
+      ItemInfo itemInfo = null;
       try {
-        ItemInfo itemInfo = null;
         boolean retryItem = false;
         if (!ctxt.isInSafeMode()) {
           itemInfo = storageMovementNeeded.get();
@@ -324,12 +317,14 @@ public class StoragePolicySatisfier implements 
SPSService, Runnable {
           blockCount = 0L;
         }
         if (retryItem) {
-          // itemInfo.increRetryCount();
           this.storageMovementNeeded.add(itemInfo);
         }
       } catch (IOException e) {
         LOG.error("Exception during StoragePolicySatisfier execution - "
             + "will continue next cycle", e);
+        // Since it could not finish this item in previous iteration due to 
IOE,
+        // just try again.
+        this.storageMovementNeeded.add(itemInfo);
       } catch (Throwable t) {
         synchronized (this) {
           if (isRunning) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
index 3293035..8427e93 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSContext.java
@@ -24,10 +24,7 @@ import java.util.ArrayList;
 import java.util.List;
 
 import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -47,7 +44,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.Datanod
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.net.NetworkTopology;
-import org.apache.hadoop.security.AccessControlException;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -93,35 +89,6 @@ public class ExternalSPSContext implements Context {
   }
 
   @Override
-  public boolean isMoverRunning() {
-    try {
-      FSDataOutputStream out = nnc.getDistributedFileSystem()
-          .append(HdfsServerConstants.MOVER_ID_PATH);
-      out.close();
-      return false;
-    } catch (IOException ioe) {
-      LOG.warn("Exception while checking mover is running..", ioe);
-      return true;
-    }
-
-  }
-
-  @Override
-  public long getFileID(String path) throws UnresolvedLinkException,
-      AccessControlException, ParentNotDirectoryException {
-    HdfsFileStatus fs = null;
-    try {
-      fs = (HdfsFileStatus) nnc.getDistributedFileSystem().getFileStatus(
-          new Path(path));
-      LOG.info("Fetched the fileID:{} for the path:{}", fs.getFileId(), path);
-    } catch (IllegalArgumentException | IOException e) {
-      LOG.warn("Exception while getting file is for the given path:{}.", path,
-          e);
-    }
-    return fs != null ? fs.getFileId() : 0;
-  }
-
-  @Override
   public NetworkTopology getNetworkTopology(DatanodeMap datanodeMap) {
     // create network topology.
     NetworkTopology cluster = NetworkTopology.getInstance(service.getConf());
@@ -152,8 +119,18 @@ public class ExternalSPSContext implements Context {
   @Override
   public void removeSPSHint(long inodeId) throws IOException {
     Path filePath = DFSUtilClient.makePathFromFileId(inodeId);
-    nnc.getDistributedFileSystem().removeXAttr(filePath,
-        HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY);
+    try {
+      nnc.getDistributedFileSystem().removeXAttr(filePath,
+          HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY);
+    } catch (IOException e) {
+      List<String> listXAttrs = nnc.getDistributedFileSystem()
+          .listXAttrs(filePath);
+      if (!listXAttrs
+          .contains(HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY)) {
+        LOG.info("SPS hint already removed for the inodeId:{}."
+            + " Ignoring exception:{}", inodeId, e.getMessage());
+      }
+    }
   }
 
   @Override
@@ -197,16 +174,6 @@ public class ExternalSPSContext implements Context {
   }
 
   @Override
-  public void removeSPSPathId(long pathId) {
-    // We need not specifically implement for external.
-  }
-
-  @Override
-  public void removeAllSPSPathIds() {
-    // We need not specifically implement for external.
-  }
-
-  @Override
   public void scanAndCollectFiles(long path)
       throws IOException, InterruptedException {
     fileCollector.scanAndCollectFiles(path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
index 8e19a7c..15cdc6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalStoragePolicySatisfier.java
@@ -70,7 +70,7 @@ public final class ExternalStoragePolicySatisfier {
 
       ExternalSPSContext context = new ExternalSPSContext(sps, nnc);
       sps.init(context);
-      sps.start(true, StoragePolicySatisfierMode.EXTERNAL);
+      sps.start(StoragePolicySatisfierMode.EXTERNAL);
       if (sps != null) {
         sps.join();
       }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index 3789779..5fd6612 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -154,7 +154,7 @@ Note that, when both -p and -f options are omitted, the 
default path is the root
 
 ####Administrator notes:
 
-`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a 
Mover instance is already triggered and running, SPS will be disabled while 
starting. In that case, administrator should make sure, Mover execution 
finished and then enable(internal service inside NN or external service outside 
NN) SPS again. Similarly when SPS enabled already, Mover cannot be run. If 
administrator is looking to run Mover tool explicitly, then he/she should make 
sure to disable SPS first and then run Mover. Please look at the commands 
section to know how to enable(internal service inside NN or external service 
outside NN) or disable SPS dynamically.
+`StoragePolicySatisfier` and `Mover tool` cannot run simultaneously. If a 
Mover instance is already triggered and running, SPS will be disabled while 
starting. In that case, administrator should make sure, Mover execution 
finished and then enable external SPS service again. Similarly when SPS enabled 
already, Mover cannot be run. If administrator is looking to run Mover tool 
explicitly, then he/she should make sure to disable SPS first and then run 
Mover. Please look at the commands section to know how to enable external 
service outside NN or disable SPS dynamically.
 
 Storage Policy Commands
 -----------------------

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 2ad8640..1ac9257 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -113,6 +113,8 @@ public class TestPersistentStoragePolicySatisfier {
     // Reduced refresh cycle to update latest datanodes.
     conf.setLong(DFSConfigKeys.DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS,
         1000);
+    conf.setInt(
+        DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MAX_RETRY_ATTEMPTS_KEY, 20);
     final int dnNumber = storageTypes.length;
     final short replication = 3;
     MiniDFSCluster.Builder clusterBuilder = new MiniDFSCluster.Builder(conf)
@@ -136,7 +138,7 @@ public class TestPersistentStoragePolicySatisfier {
     ctxt = new ExternalSPSContext(sps, nnc);
 
     sps.init(ctxt);
-    sps.start(true, StoragePolicySatisfierMode.EXTERNAL);
+    sps.start(StoragePolicySatisfierMode.EXTERNAL);
 
     createTestFiles(fs, replication);
   }
@@ -188,6 +190,7 @@ public class TestPersistentStoragePolicySatisfier {
    */
   @Test(timeout = 300000)
   public void testWithCheckpoint() throws Exception {
+    SecondaryNameNode secondary = null;
     try {
       clusterSetUp();
       fs.setStoragePolicy(testFile, WARM);
@@ -196,7 +199,7 @@ public class TestPersistentStoragePolicySatisfier {
       // Start the checkpoint.
       conf.set(
           DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY, "0.0.0.0:0");
-      SecondaryNameNode secondary = new SecondaryNameNode(conf);
+      secondary = new SecondaryNameNode(conf);
       secondary.doCheckpoint();
       restartCluster();
 
@@ -214,6 +217,9 @@ public class TestPersistentStoragePolicySatisfier {
           childFileName, StorageType.ARCHIVE, 3, timeout, fs);
 
     } finally {
+      if (secondary != null) {
+        secondary.shutdown();
+      }
       clusterShutdown();
     }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
index 250e54b..018a5dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -510,7 +510,7 @@ public class TestStoragePolicySatisfierWithStripedFile {
     sps = new StoragePolicySatisfier(conf);
     ctxt = new ExternalSPSContext(sps, nnc);
     sps.init(ctxt);
-    sps.start(true, StoragePolicySatisfierMode.EXTERNAL);
+    sps.start(StoragePolicySatisfierMode.EXTERNAL);
   }
 
   private static void initConfWithStripe(Configuration conf,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index d9a93fd..53878e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -219,7 +219,7 @@ public class TestExternalStoragePolicySatisfier {
     externalCtxt = new ExternalSPSContext(externalSps, nnc);
 
     externalSps.init(externalCtxt);
-    externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL);
+    externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
     return cluster;
   }
 
@@ -234,7 +234,7 @@ public class TestExternalStoragePolicySatisfier {
 
     externalCtxt = new ExternalSPSContext(externalSps, nnc);
     externalSps.init(externalCtxt);
-    externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL);
+    externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
   }
 
   private void initSecureConf(Configuration conf) throws Exception {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
----------------------------------------------------------------------
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
index 1ab7788..61fccfa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/TestStoragePolicySatisfyAdminCommands.java
@@ -71,7 +71,7 @@ public class TestStoragePolicySatisfyAdminCommands {
     Context externalCtxt = new ExternalSPSContext(externalSps, nnc);
 
     externalSps.init(externalCtxt);
-    externalSps.start(true, StoragePolicySatisfierMode.EXTERNAL);
+    externalSps.start(StoragePolicySatisfierMode.EXTERNAL);
   }
 
   @After


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to