[27/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d4f74e7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index 2a7bde5..9354044 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -72,7 +72,6 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -147,12 +146,11 @@ public class TestStoragePolicySatisfier {
 startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
 storagesPerDatanode, capacity, hdfsCluster);
 
-dfs.satisfyStoragePolicy(new Path(file));
-
 hdfsCluster.triggerHeartbeats();
+dfs.satisfyStoragePolicy(new Path(file));
 // Wait till namenode notified about the block location details
-DFSTestUtil.waitExpectedStorageType(
-file, StorageType.ARCHIVE, 3, 3, dfs);
+DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 35000,
+dfs);
   }
 
   @Test(timeout = 30)
@@ -1284,6 +1282,7 @@ public class TestStoragePolicySatisfier {
 {StorageType.ARCHIVE, StorageType.SSD},
 {StorageType.DISK, StorageType.DISK}};
 config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
 hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
 storagesPerDatanode, capacity);
 dfs = hdfsCluster.getFileSystem();
@@ -1299,19 +1298,28 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = Mockito.mock(StoragePolicySatisfier.class);
-Mockito.when(sps.isRunning()).thenReturn(true);
-Context ctxt = Mockito.mock(Context.class);
-config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
-Mockito.when(ctxt.getConf()).thenReturn(config);
-Mockito.when(ctxt.isRunning()).thenReturn(true);
-Mockito.when(ctxt.isInSafeMode()).thenReturn(false);
-Mockito.when(ctxt.isFileExist(Mockito.anyLong())).thenReturn(true);
-BlockStorageMovementNeeded movmentNeededQueue =
-new BlockStorageMovementNeeded(ctxt);
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(hdfsCluster.getNamesystem(),
+hdfsCluster.getNamesystem().getBlockManager(), sps) {
+  @Override
+  public boolean isInSafeMode() {
+return false;
+  }
+
+  @Override
+  public boolean isRunning() {
+return true;
+  }
+};
+
+FileIdCollector fileIDCollector =
+new IntraSPSNameNodeFileIdCollector(fsDir, sps);
+sps.init(ctxt, fileIDCollector, null);
+sps.getStorageMovementQueue().activate();
+
 INode rootINode = fsDir.getINode("/root");
-movmentNeededQueue.addToPendingDirQueue(rootINode.getId());
-movmentNeededQueue.init(fsDir);
+hdfsCluster.getNamesystem().getBlockManager()
+.addSPSPathId(rootINode.getId());
 
 //Wait for thread to reach U.
 Thread.sleep(1000);
@@ -1321,7 +1329,7 @@ public class TestStoragePolicySatisfier {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1332,7 +1340,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and R,S should not be added in
 // queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1352,6 +1360,7 @@ public class 

[34/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
index 0e3a5a3..2257608 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -103,8 +104,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -216,8 +217,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -328,8 +329,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 conf.set(DFSConfigKeys
 .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
 "3000");
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -420,8 +421,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 9a401bd..42b04da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
@@ -54,12 +55,19 @@ public class TestExternalStoragePolicySatisfier
   new StorageType[][]{{StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK}};

[48/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index b05717a..ec5307b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -108,8 +108,6 @@ public class TestStoragePolicySatisfier {
   public static final long CAPACITY = 2 * 256 * 1024 * 1024;
   public static final String FILE = "/testMoveToSatisfyStoragePolicy";
   public static final int DEFAULT_BLOCK_SIZE = 1024;
-  private ExternalBlockMovementListener blkMoveListener =
-  new ExternalBlockMovementListener();
 
   /**
* Sets hdfs cluster.
@@ -1282,8 +1280,8 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1297,8 +1295,7 @@ public class TestStoragePolicySatisfier {
   }
 };
 
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1314,13 +1311,6 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), true);
   }
 
-  public FileCollector createFileIdCollector(
-  StoragePolicySatisfier sps, Context ctxt) {
-FileCollector fileIDCollector = new IntraSPSNameNodeFileIdCollector(
-hdfsCluster.getNamesystem().getFSDirectory(), sps);
-return fileIDCollector;
-  }
-
   /**
*  Test traverse when root parent got deleted.
*  1. Delete L when traversing Q
@@ -1351,8 +1341,8 @@ public class TestStoragePolicySatisfier {
 
 // Queue limit can control the traverse logic to wait for some free
 // entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1365,8 +1355,7 @@ public class TestStoragePolicySatisfier {
 return true;
   }
 };
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1383,12 +1372,12 @@ public class TestStoragePolicySatisfier {
   }
 
   private void assertTraversal(List expectedTraverseOrder,
-  FSDirectory fsDir, StoragePolicySatisfier sps)
+  FSDirectory fsDir, StoragePolicySatisfier sps)
   throws InterruptedException {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1403,7 +1392,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and E, M, U, R, S should not be
 // added in queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1717,17 +1706,17 @@ public class TestStoragePolicySatisfier {
   public void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
   int timeout) throws TimeoutException, InterruptedException {
 BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
-final StoragePolicySatisfier sps =
-(StoragePolicySatisfier) blockManager.getSPSManager()
+final StoragePolicySatisfier sps =
+

[46/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Addendum. Resolve conflicts after rebasing branch to trunk. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13076: [SPS]: Addendum. Resolve conflicts after rebasing branch to trunk. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfcb331b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfcb331b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfcb331b

Branch: refs/heads/trunk
Commit: dfcb331ba3516264398121c9f23af3a79c0509cc
Parents: db3f227
Author: Rakesh Radhakrishnan 
Authored: Fri Jul 20 10:59:16 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfcb331b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index a714602..21af33f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3624,8 +3624,8 @@ public class DataNode extends ReconfigurableBase
 }
 return this.diskBalancer;
   }
-}
 
   StoragePolicySatisfyWorker getStoragePolicySatisfyWorker() {
 return storagePolicySatisfyWorker;
-  }}
+  }
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bcf61c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bcf61c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bcf61c6

Branch: refs/heads/trunk
Commit: 4bcf61c696909342f1a238f614d4471c4b6fbad0
Parents: 9e82e5a
Author: Uma Maheswara Rao G 
Authored: Mon Jul 17 10:24:06 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../hdfs/server/blockmanagement/BlockManager.java   |  2 +-
 .../server/datanode/StoragePolicySatisfyWorker.java |  6 +++---
 .../hdfs/server/namenode/StoragePolicySatisfier.java|  6 +++---
 .../hadoop/hdfs/server/protocol/DatanodeProtocol.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/mover/TestMover.java  |  7 ---
 .../server/namenode/TestStoragePolicySatisfier.java | 12 ++--
 6 files changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bcf61c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 988067c..8b7abaa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -427,7 +427,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final BlockIdManager blockIdManager;
 
-  /** For satisfying block storage policies */
+  /** For satisfying block storage policies. */
   private final StoragePolicySatisfier sps;
   private final BlockStorageMovementNeeded storageMovementNeeded =
   new BlockStorageMovementNeeded();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bcf61c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index f4f97dd..196cd58 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
   /**
* Block movement status code.
*/
-  public static enum BlockMovementStatus {
+  public enum BlockMovementStatus {
 /** Success. */
 DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
 /**
@@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
 
 private final int code;
 
-private BlockMovementStatus(int code) {
+BlockMovementStatus(int code) {
   this.code = code;
 }
 
@@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
 private final DatanodeInfo target;
 private final BlockMovementStatus status;
 
-public BlockMovementResult(long trackId, long blockId,
+BlockMovementResult(long trackId, long blockId,
 DatanodeInfo target, BlockMovementStatus status) {
   this.trackId = trackId;
   this.blockId = blockId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bcf61c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 00b4cd0..af3b7f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   private static class StorageTypeNodePair {
-public StorageType storageType = null;
-public DatanodeDescriptor dn = null;
+private StorageType storageType = null;
+private 

[30/50] [abbrv] hadoop git commit: HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma Maheswara Rao G.

2018-08-12 Thread umamahesh
HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma 
Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99594b48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99594b48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99594b48

Branch: refs/heads/trunk
Commit: 99594b48b8e040ab5a0939d7c3dbcfb34400e6fc
Parents: 3b83110
Author: Surendra Singh Lilhore 
Authored: Sun Jan 28 20:46:56 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  67 +
 .../NamenodeProtocolTranslatorPB.java   |  58 
 .../hdfs/server/balancer/NameNodeConnector.java |  28 +-
 .../server/blockmanagement/BlockManager.java|  19 ++
 .../server/blockmanagement/DatanodeManager.java |  18 ++
 .../hdfs/server/common/HdfsServerConstants.java |   3 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  53 +++-
 .../sps/BlockStorageMovementNeeded.java |   8 +-
 .../hdfs/server/namenode/sps/Context.java   |   9 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java|  15 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  46 +++-
 .../hdfs/server/sps/ExternalSPSContext.java | 271 +++
 .../src/main/proto/NamenodeProtocol.proto   |  57 
 .../sps/TestExternalStoragePolicySatisfier.java |  31 +--
 15 files changed, 652 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99594b48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 90c2c49..25eafdf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -33,10 +35,16 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto;
 import 

[03/50] [abbrv] hadoop git commit: HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12146. [SPS]: Fix 
TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks.
 Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e82e5a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e82e5a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e82e5a8

Branch: refs/heads/trunk
Commit: 9e82e5a86ea66b9d24d38b922ee5fa97b3391475
Parents: 68af4e1
Author: Rakesh Radhakrishnan 
Authored: Mon Jul 17 22:40:03 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../server/namenode/TestStoragePolicySatisfier.java |  9 +
 .../TestStoragePolicySatisfierWithStripedFile.java  | 16 
 2 files changed, 13 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e82e5a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index be7236b..10ceae7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
   list.add(cluster.stopDataNode(0));
   list.add(cluster.stopDataNode(0));
   cluster.restartNameNodes();
-  cluster.restartDataNode(list.get(0), true);
-  cluster.restartDataNode(list.get(1), true);
+  cluster.restartDataNode(list.get(0), false);
+  cluster.restartDataNode(list.get(1), false);
   cluster.waitActive();
   fs.satisfyStoragePolicy(filePath);
-  Thread.sleep(3000 * 6);
-  cluster.restartDataNode(list.get(2), true);
+  DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+  StorageType.ARCHIVE, 2, 3, cluster.getFileSystem());
+  cluster.restartDataNode(list.get(2), false);
   DFSTestUtil.waitExpectedStorageType(filePath.toString(),
   StorageType.ARCHIVE, 3, 3, cluster.getFileSystem());
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e82e5a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index f905ead..c070113 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
*/
   @Test(timeout = 30)
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
+// start 9 datanodes
+int numOfDatanodes = 9;
 int storagesPerDatanode = 2;
 long capacity = 20 * defaultStripeBlockSize;
 long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
-{StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE}})
 .storageCapacities(capacities)
 .build();
@@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
   }
   cluster.restartNameNodes();
   // Restart half datanodes
-  for (int i = 0; i < numOfDatanodes / 2; i++) {
-cluster.restartDataNode(list.get(i), true);
+  for (int i = 0; i < 5; i++) {
+cluster.restartDataNode(list.get(i), false);
   }
   cluster.waitActive();
   fs.satisfyStoragePolicy(fooFile);
-  Thread.sleep(3000 * 6);
+  DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
+  StorageType.ARCHIVE, 5, 3, cluster.getFileSystem());
   //Start reaming datanodes
-

[13/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 57e9f94..70219f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -203,11 +203,11 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
-   * Tests to verify that the block storage movement results will be propagated
+   * Tests to verify that the block storage movement report will be propagated
* to Namenode via datanode heartbeat.
*/
   @Test(timeout = 30)
-  public void testPerTrackIdBlocksStorageMovementResults() throws Exception {
+  public void testBlksStorageMovementAttemptFinishedReport() throws Exception {
 try {
   createCluster();
   // Change policy to ONE_SSD
@@ -229,7 +229,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -276,7 +276,7 @@ public class TestStoragePolicySatisfier {
 fileName, StorageType.DISK, 2, 3, dfs);
   }
 
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
 } finally {
   shutdownCluster();
 }
@@ -457,7 +457,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -630,7 +630,7 @@ public class TestStoragePolicySatisfier {
   // No block movement will be scheduled as there is no target node
   // available with the required storage type.
   waitForAttemptedItems(1, 3);
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
   DFSTestUtil.waitExpectedStorageType(
   file1, StorageType.ARCHIVE, 1, 3, dfs);
   DFSTestUtil.waitExpectedStorageType(
@@ -691,7 +691,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 3, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -871,7 +871,7 @@ public class TestStoragePolicySatisfier {
   Set dns = hdfsCluster.getNamesystem()
   .getBlockManager().getDatanodeManager().getDatanodes();
   for (DatanodeDescriptor dd : dns) {
-assertNull(dd.getBlocksToMoveStorages());
+assertNull(dd.getBlocksToMoveStorages(1));
   }
 
   // Enable heart beats now
@@ -1224,7 +1224,7 @@ public class TestStoragePolicySatisfier {
   /**
* Test SPS for batch processing.
*/
-  @Test(timeout = 30)
+  @Test(timeout = 300)
   public void testBatchProcessingForSPSDirectory() throws Exception {
 try {
   StorageType[][] diskTypes = new StorageType[][] {
@@ -1252,7 +1252,7 @@ public class TestStoragePolicySatisfier {
 DFSTestUtil.waitExpectedStorageType(fileName, StorageType.ARCHIVE, 2,
 3, dfs);
   }
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
   String expectedLogMessage = "StorageMovementNeeded queue remaining"
   + " capacity is zero";
   assertTrue("Log output does not contain expected log message: "
@@ -1268,7 +1268,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete /root when traversing Q
*  2. U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1330,7 +1330,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete L when traversing Q
*  2. E, M, U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenRootParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1387,6 +1387,82 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), 

[09/50] [abbrv] hadoop git commit: HDFS-12556: [SPS] : Block movement analysis should be done in read lock.

2018-08-12 Thread umamahesh
HDFS-12556: [SPS] : Block movement analysis should be done in read lock.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5780f062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5780f062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5780f062

Branch: refs/heads/trunk
Commit: 5780f0624de2531194bc98eb25a928f7a483b992
Parents: 00eceed
Author: Surendra Singh Lilhore 
Authored: Sat Oct 14 15:11:26 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../server/namenode/StoragePolicySatisfier.java | 27 +---
 .../TestPersistentStoragePolicySatisfier.java   |  2 +-
 2 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5780f062/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a28a806..cbfba44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -242,12 +242,25 @@ public class StoragePolicySatisfier implements Runnable {
   ItemInfo itemInfo = storageMovementNeeded.get();
   if (itemInfo != null) {
 long trackId = itemInfo.getTrackId();
-BlockCollection blockCollection =
-namesystem.getBlockCollection(trackId);
-// Check blockCollectionId existence.
+BlockCollection blockCollection;
+BlocksMovingAnalysis status = null;
+try {
+  namesystem.readLock();
+  blockCollection = namesystem.getBlockCollection(trackId);
+  // Check blockCollectionId existence.
+  if (blockCollection == null) {
+// File doesn't exists (maybe got deleted), remove trackId from
+// the queue
+storageMovementNeeded.removeItemTrackInfo(itemInfo);
+  } else {
+status =
+analyseBlocksStorageMovementsAndAssignToDN(
+blockCollection);
+  }
+} finally {
+  namesystem.readUnlock();
+}
 if (blockCollection != null) {
-  BlocksMovingAnalysis status =
-  analyseBlocksStorageMovementsAndAssignToDN(blockCollection);
   switch (status.status) {
   // Just add to monitor, so it will be retried after timeout
   case ANALYSIS_SKIPPED_FOR_RETRY:
@@ -283,10 +296,6 @@ public class StoragePolicySatisfier implements Runnable {
 storageMovementNeeded.removeItemTrackInfo(itemInfo);
 break;
   }
-} else {
-  // File doesn't exists (maybe got deleted), remove trackId from
-  // the queue
-  storageMovementNeeded.removeItemTrackInfo(itemInfo);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5780f062/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 5bce296..7165d06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -72,7 +72,7 @@ public class TestPersistentStoragePolicySatisfier {
   {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD}
   };
 
-  private final int timeout = 30;
+  private final int timeout = 9;
 
   /**
* Setup environment for every test case.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78420719
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78420719
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78420719

Branch: refs/heads/trunk
Commit: 78420719eb1f138c6f10558befb7bc8ebcc28a54
Parents: c561cb3
Author: Uma Maheswara Rao G 
Authored: Fri Dec 22 09:10:12 2017 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|6 +-
 .../BlockStorageMovementAttemptedItems.java |  241 ---
 .../namenode/BlockStorageMovementNeeded.java|  574 --
 .../hdfs/server/namenode/FSNamesystem.java  |1 +
 .../hdfs/server/namenode/IntraNNSPSContext.java |   41 +
 .../server/namenode/StoragePolicySatisfier.java |  973 --
 .../sps/BlockStorageMovementAttemptedItems.java |  241 +++
 .../sps/BlockStorageMovementNeeded.java |  572 ++
 .../namenode/sps/StoragePolicySatisfier.java|  988 ++
 .../hdfs/server/namenode/sps/package-info.java  |   28 +
 .../TestBlockStorageMovementAttemptedItems.java |  196 --
 .../namenode/TestStoragePolicySatisfier.java| 1775 -
 ...stStoragePolicySatisfierWithStripedFile.java |  580 --
 .../TestBlockStorageMovementAttemptedItems.java |  196 ++
 .../sps/TestStoragePolicySatisfier.java | 1779 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  580 ++
 16 files changed, 4430 insertions(+), 4341 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0957fe2..ec99a9f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -478,7 +479,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, this, conf);
+StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
+sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
deleted file mode 100644
index 643255f..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this 

[17/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
new file mode 100644
index 000..c1a2b8b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -0,0 +1,580 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
+ * to be moved and finding its expected target locations in order to satisfy 
the
+ * storage policy.
+ */
+public class TestStoragePolicySatisfierWithStripedFile {
+
+  private static final Logger LOG = LoggerFactory
+  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
+
+  private final int stripesPerBlock = 2;
+
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocks;
+  private int cellSize;
+  private int defaultStripeBlockSize;
+
+  private ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
+  /**
+   * Initialize erasure coding policy.
+   */
+  @Before
+  public void init(){
+ecPolicy = getEcPolicy();
+dataBlocks = ecPolicy.getNumDataUnits();
+parityBlocks = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+defaultStripeBlockSize = cellSize * stripesPerBlock;
+  }
+
+  /**
+   * Tests to verify that all the striped blocks(data + parity blocks) are
+   * moving to satisfy the storage policy.
+   */
+  @Test(timeout = 30)
+  public void testMoverWithFullStripe() throws Exception {
+// start 10 datanodes
+int numOfDatanodes = 10;
+int storagesPerDatanode = 2;
+long capacity = 20 * defaultStripeBlockSize;
+long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+for (int i = 0; i < numOfDatanodes; i++) {
+  for (int j = 0; j < storagesPerDatanode; j++) {
+capacities[i][j] = capacity;
+  }
+}
+
+final Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+true);
+initConfWithStripe(conf, defaultStripeBlockSize);
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(numOfDatanodes)
+

[01/50] [abbrv] hadoop git commit: HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the edits log. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/trunk a2a8c4869 -> 3ac07b720


HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the 
edits log. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ce332dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ce332dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ce332dc

Branch: refs/heads/trunk
Commit: 5ce332dc9a072f8850ab71ba16898faf8e866c06
Parents: 6fe6c54
Author: Uma Maheswara Rao G 
Authored: Mon May 22 21:39:43 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:05:59 2018 -0700

--
 .../hdfs/server/namenode/FSDirAttrOp.java   |  91 
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 145 +++
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |  16 --
 .../hdfs/server/namenode/FSNamesystem.java  |  24 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  10 ++
 .../server/namenode/StoragePolicySatisfier.java |   4 +-
 .../TestPersistentStoragePolicySatisfier.java   |  90 +++-
 .../namenode/TestStoragePolicySatisfier.java|   5 +-
 9 files changed, 268 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ce332dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 0df58bf..1dbee96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -43,14 +42,12 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -193,29 +190,6 @@ public class FSDirAttrOp {
 return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
-  String src, boolean logRetryCache) throws IOException {
-
-FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
-INodesInPath iip;
-fsd.writeLock();
-try {
-
-  // check operation permission.
-  iip = fsd.resolvePath(pc, src, DirOp.WRITE);
-  if (fsd.isPermissionEnabled()) {
-fsd.checkPathAccess(pc, iip, FsAction.WRITE);
-  }
-  XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-} finally {
-  fsd.writeUnlock();
-}
-fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-return fsd.getAuditFileInfo(iip);
-  }
-
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
   throws IOException {
 return bm.getStoragePolicies();
@@ -477,71 +451,6 @@ public class FSDirAttrOp {
 }
   }
 
-  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
-  BlockManager bm, FSDirectory fsd) throws IOException {
-
-final INode inode = FSDirectory.resolveLastINode(iip);
-final int snapshotId = iip.getLatestSnapshotId();
-final List candidateNodes = new ArrayList<>();
-
-// TODO: think about optimization here, label the dir instead
-// of the sub-files of the dir.
-if (inode.isFile()) {
-  candidateNodes.add(inode);
-} else if (inode.isDirectory()) {
-  for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
-  candidateNodes.add(node);
-}
-  }
-}
-
-// If node has satisfy xattr, then stop adding it
-// to satisfy 

[16/50] [abbrv] hadoop git commit: HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and HDFS-11968 commits. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and 
HDFS-11968 commits. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9b83f94f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9b83f94f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9b83f94f

Branch: refs/heads/trunk
Commit: 9b83f94f35eb8cd20d9f3e0cbbeecb6ffb5b
Parents: 68017e3
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 10 10:06:43 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../federation/router/RouterRpcServer.java  |  19 +++
 .../namenode/TestStoragePolicySatisfier.java|   9 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  21 +--
 .../hdfs/tools/TestStoragePolicyCommands.java   |  57 -
 .../TestStoragePolicySatisfyAdminCommands.java  | 127 +++
 5 files changed, 162 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b83f94f/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 027db8a..c5458f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -87,6 +87,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -2490,4 +2491,22 @@ public class RouterRpcServer extends AbstractService
   public FederationRPCMetrics getRPCMetrics() {
 return this.rpcMonitor.getRPCMetrics();
   }
+
+  @Override
+  public void satisfyStoragePolicy(String path) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return false;
+  }
+
+  @Override
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+checkOperation(OperationCategory.READ, false);
+return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9b83f94f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index f42d911..edd1aca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -61,6 +61,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Assert;
@@ -912,8 +913,6 @@ public class TestStoragePolicySatisfier {
 
 int defaultStripedBlockSize =
 StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4;
-config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-StripedFileTestUtil.getDefaultECPolicy().getName());
 config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
 config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
 

[25/50] [abbrv] hadoop git commit: HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed 
by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c561cb31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c561cb31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c561cb31

Branch: refs/heads/trunk
Commit: c561cb316e365ef674784cd6cf0b12c0fbc271a3
Parents: 9b83f94
Author: Surendra Singh Lilhore 
Authored: Wed Nov 15 20:22:27 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  6 +++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  4 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 10 ++--
 .../server/blockmanagement/DatanodeManager.java | 12 ++---
 .../datanode/StoragePolicySatisfyWorker.java|  3 +-
 .../BlockStorageMovementAttemptedItems.java |  8 +--
 .../namenode/BlockStorageMovementNeeded.java| 46 
 .../hdfs/server/namenode/FSNamesystem.java  |  3 ++
 .../server/namenode/StoragePolicySatisfier.java | 42 ---
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 27 +++---
 .../src/main/resources/hdfs-default.xml | 17 --
 .../src/site/markdown/ArchivalStorage.md|  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 10 ++--
 .../namenode/TestStoragePolicySatisfier.java| 57 ++--
 15 files changed, 199 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c561cb31/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 190a1c6..aabcdd9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -150,6 +150,12 @@ public final class HdfsConstants {
 SUCCESS,
 
 /**
+ * Few blocks failed to move and the path is still not
+ * fully satisfied the storage policy.
+ */
+FAILURE,
+
+/**
  * Status not available.
  */
 NOT_AVAILABLE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c561cb31/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 9281bff..7770e31 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -3409,6 +3409,8 @@ public class PBHelperClient {
   return StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:
@@ -3425,6 +3427,8 @@ public class PBHelperClient {
   return HdfsConstants.StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return HdfsConstants.StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return HdfsConstants.StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return HdfsConstants.StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c561cb31/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 1de13ca..933a19a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -854,7 +854,8 @@ message CheckStoragePolicySatisfyPathStatusResponseProto {
 PENDING = 0;
 IN_PROGRESS = 1;
 SUCCESS = 2;
-NOT_AVAILABLE = 3;
+   

[05/50] [abbrv] hadoop git commit: HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running together. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running 
together. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5eb24ef7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5eb24ef7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5eb24ef7

Branch: refs/heads/trunk
Commit: 5eb24ef7e7b8fb61a5f5b88bae3596b30aaeb60b
Parents: 0b360b1
Author: Uma Maheswara Rao G 
Authored: Wed Jul 12 17:56:56 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../server/namenode/StoragePolicySatisfier.java | 53 +++-
 .../namenode/TestStoragePolicySatisfier.java|  3 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  5 +-
 3 files changed, 34 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eb24ef7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 97cbf1b..00b4cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -128,6 +128,14 @@ public class StoragePolicySatisfier implements Runnable {
*/
   public synchronized void start(boolean reconfigStart) {
 isRunning = true;
+if (checkIfMoverRunning()) {
+  isRunning = false;
+  LOG.error(
+  "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
+  + HdfsServerConstants.MOVER_ID_PATH.toString()
+  + " been opened. Maybe a Mover instance is running!");
+  return;
+}
 if (reconfigStart) {
   LOG.info("Starting StoragePolicySatisfier, as admin requested to "
   + "activate it.");
@@ -211,20 +219,6 @@ public class StoragePolicySatisfier implements Runnable {
 
   @Override
   public void run() {
-boolean isMoverRunning = !checkIfMoverRunning();
-synchronized (this) {
-  isRunning = isMoverRunning;
-  if (!isRunning) {
-// Stopping monitor thread and clearing queues as well
-this.clearQueues();
-this.storageMovementsMonitor.stopGracefully();
-LOG.error(
-"Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-+ HdfsServerConstants.MOVER_ID_PATH.toString()
-+ " been opened. Maybe a Mover instance is running!");
-return;
-  }
-}
 while (namesystem.isRunning() && isRunning) {
   try {
 if (!namesystem.isInSafeMode()) {
@@ -274,25 +268,34 @@ public class StoragePolicySatisfier implements Runnable {
 // we want to check block movements.
 Thread.sleep(3000);
   } catch (Throwable t) {
-synchronized (this) {
+handleException(t);
+  }
+}
+  }
+
+  private void handleException(Throwable t) {
+// double check to avoid entering into synchronized block.
+if (isRunning) {
+  synchronized (this) {
+if (isRunning) {
   isRunning = false;
   // Stopping monitor thread and clearing queues as well
   this.clearQueues();
   this.storageMovementsMonitor.stopGracefully();
-}
-if (!namesystem.isRunning()) {
-  LOG.info("Stopping StoragePolicySatisfier.");
-  if (!(t instanceof InterruptedException)) {
-LOG.info("StoragePolicySatisfier received an exception"
-+ " while shutting down.", t);
+  if (!namesystem.isRunning()) {
+LOG.info("Stopping StoragePolicySatisfier.");
+if (!(t instanceof InterruptedException)) {
+  LOG.info("StoragePolicySatisfier received an exception"
+  + " while shutting down.", t);
+}
+return;
   }
-  break;
 }
-LOG.error("StoragePolicySatisfier thread received runtime exception. "
-+ "Stopping Storage policy satisfier work", t);
-break;
   }
 }
+LOG.error("StoragePolicySatisfier thread received runtime exception. "
++ "Stopping Storage policy satisfier work", t);
+return;
   }
 
   private BlocksMovingAnalysisStatus 
analyseBlocksStorageMovementsAndAssignToDN(


[26/50] [abbrv] hadoop git commit: HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. Contributed by Uma Maheswara Rao G.

2018-08-12 Thread umamahesh
HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. 
Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3159b39c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3159b39c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3159b39c

Branch: refs/heads/trunk
Commit: 3159b39cf8ef704835325263154fb1a1cecc109d
Parents: 8d4f74e
Author: Rakesh Radhakrishnan 
Authored: Tue Jan 23 20:09:26 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:03 2018 -0700

--
 .../sps/BlockStorageMovementNeeded.java |  70 +++-
 .../hdfs/server/namenode/sps/Context.java   |   8 +
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   2 +
 .../namenode/sps/IntraSPSNameNodeContext.java   |   7 +
 .../sps/IntraSPSNameNodeFileIdCollector.java|   6 +-
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java|   8 +-
 .../server/sps/ExternalSPSFileIDCollector.java  | 156 +
 .../hadoop/hdfs/server/sps/package-info.java|  28 ++
 .../sps/TestStoragePolicySatisfier.java | 323 ++-
 .../sps/TestExternalStoragePolicySatisfier.java | 108 +++
 11 files changed, 556 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159b39c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index 39a0051..b141502 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -97,23 +97,53 @@ public class BlockStorageMovementNeeded {
   }
 
   /**
-   * Add the itemInfo to tracking list for which storage movement
-   * expected if necessary.
+   * Add the itemInfo list to tracking list for which storage movement expected
+   * if necessary.
+   *
* @param startId
-   *- start id
+   *  - start id
* @param itemInfoList
-   *- List of child in the directory
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the start id directory has no more elements to
+   *  scan.
*/
   @VisibleForTesting
-  public synchronized void addAll(long startId,
-  List itemInfoList, boolean scanCompleted) {
+  public synchronized void addAll(long startId, List itemInfoList,
+  boolean scanCompleted) {
 storageMovementNeeded.addAll(itemInfoList);
+updatePendingDirScanStats(startId, itemInfoList.size(), scanCompleted);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement expected if
+   * necessary.
+   *
+   * @param itemInfoList
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the ItemInfo start id directory has no more
+   *  elements to scan.
+   */
+  @VisibleForTesting
+  public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
+storageMovementNeeded.add(itemInfo);
+// This represents sps start id is file, so no need to update pending dir
+// stats.
+if (itemInfo.getStartId() == itemInfo.getFileId()) {
+  return;
+}
+updatePendingDirScanStats(itemInfo.getStartId(), 1, scanCompleted);
+  }
+
+  private void updatePendingDirScanStats(long startId, int numScannedFiles,
+  boolean scanCompleted) {
 DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
 if (pendingWork == null) {
   pendingWork = new DirPendingWorkInfo();
   pendingWorkForDirectory.put(startId, pendingWork);
 }
-pendingWork.addPendingWorkCount(itemInfoList.size());
+pendingWork.addPendingWorkCount(numScannedFiles);
 if (scanCompleted) {
   pendingWork.markScanCompleted();
 }
@@ -250,13 +280,15 @@ public class BlockStorageMovementNeeded {
 
 @Override
 public void run() {
-  LOG.info("Starting FileInodeIdCollector!.");
+  LOG.info("Starting SPSPathIdProcessor!.");
   long lastStatusCleanTime = 0;
+  Long startINodeId = null;
   while (ctxt.isRunning()) {
-LOG.info("Running FileInodeIdCollector!.");
 try {
   if (!ctxt.isInSafeMode()) {
-Long startINodeId = ctxt.getNextSPSPathId();
+if 

[33/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-08-12 Thread umamahesh
HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by 
Surendra Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4402f3f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4402f3f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4402f3f8

Branch: refs/heads/trunk
Commit: 4402f3f8557527d5c6cdad6f5bdcbd707b8cbf52
Parents: d3de4fb
Author: Uma Maheswara Rao G 
Authored: Wed Feb 7 02:28:23 2018 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   6 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   8 +-
 .../federation/router/RouterRpcServer.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  61 ---
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +-
 .../server/blockmanagement/BlockManager.java| 255 +---
 .../blockmanagement/DatanodeDescriptor.java |  33 +-
 .../hdfs/server/common/HdfsServerConstants.java |   2 +-
 .../datanode/StoragePolicySatisfyWorker.java|  15 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  26 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   1 -
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  46 +--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  30 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  21 +-
 .../sps/BlockStorageMovementNeeded.java |   4 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   6 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|  70 
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java| 137 ---
 .../sps/StoragePolicySatisfyManager.java| 399 +++
 .../sps/ExternalStoragePolicySatisfier.java |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  19 +-
 .../TestPersistentStoragePolicySatisfier.java   |   3 +-
 .../TestStoragePolicySatisfierWithHA.java   |   6 +-
 .../sps/TestStoragePolicySatisfier.java |  35 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  24 +-
 33 files changed, 665 insertions(+), 604 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 471ab2c..b6f9bdd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3110,8 +3110,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public boolean isStoragePolicySatisfierRunning() throws IOException {
-return namenode.isStoragePolicySatisfierRunning();
+  public boolean isInternalSatisfierRunning() throws IOException {
+return namenode.isInternalSatisfierRunning();
   }
 
   Tracer getTracer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 360fd63..5c51c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1759,12 +1759,12 @@ public interface ClientProtocol {
   void satisfyStoragePolicy(String path) throws IOException;
 
   /**
-   * Check if StoragePolicySatisfier is running.
-   * @return true if StoragePolicySatisfier is running
+   * Check if internal StoragePolicySatisfier is running.
+   * @return true if internal StoragePolicySatisfier is running
* @throws IOException
*/
   @Idempotent
-  boolean isStoragePolicySatisfierRunning() throws IOException;
+  boolean 

[41/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by 
external satisfier. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8467ec24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8467ec24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8467ec24

Branch: refs/heads/trunk
Commit: 8467ec24fb74f30371d5a13e893fc56309ee9372
Parents: 4402f3f
Author: Rakesh Radhakrishnan 
Authored: Fri Feb 16 17:01:38 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:05 2018 -0700

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  46 +
 .../NamenodeProtocolTranslatorPB.java   |  42 +
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  32 +---
 .../server/namenode/ReencryptionHandler.java|   2 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  42 +++--
 .../sps/BlockStorageMovementNeeded.java | 119 +++--
 .../hdfs/server/namenode/sps/Context.java   |  55 +++---
 .../hdfs/server/namenode/sps/FileCollector.java |  48 +
 .../server/namenode/sps/FileIdCollector.java|  43 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |  39 ++---
 .../sps/IntraSPSNameNodeFileIdCollector.java|  23 +--
 .../hdfs/server/namenode/sps/ItemInfo.java  |  39 +++--
 .../hdfs/server/namenode/sps/SPSService.java|  32 ++--
 .../namenode/sps/StoragePolicySatisfier.java| 129 +-
 .../sps/StoragePolicySatisfyManager.java|   6 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  24 +--
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   4 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  60 +++
 .../server/sps/ExternalSPSFileIDCollector.java  | 174 ---
 .../sps/ExternalSPSFilePathCollector.java   | 172 ++
 .../sps/ExternalStoragePolicySatisfier.java |   7 +-
 .../src/main/proto/NamenodeProtocol.proto   |  27 +--
 .../TestBlockStorageMovementAttemptedItems.java |  27 ++-
 .../sps/TestStoragePolicySatisfier.java |  52 +++---
 ...stStoragePolicySatisfierWithStripedFile.java |  15 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 148 +++-
 27 files changed, 701 insertions(+), 708 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8467ec24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 25eafdf..ed176cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -35,16 +35,12 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 

[50/50] [abbrv] hadoop git commit: HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and #getFileInfo() function. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and 
#getFileInfo() function. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ac07b72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ac07b72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ac07b72

Branch: refs/heads/trunk
Commit: 3ac07b720b7839a7fe6c83f4ccfe319b6a892501
Parents: 39ed3a6
Author: Uma Maheswara Rao Gangumalla 
Authored: Sat Aug 11 23:22:59 2018 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:07 2018 -0700

--
 .../router/RouterNamenodeProtocol.java  |  1 +
 .../server/blockmanagement/BlockManager.java| 34 
 .../blockmanagement/DatanodeDescriptor.java |  2 +-
 .../server/blockmanagement/DatanodeManager.java | 17 --
 .../hdfs/server/datanode/BPServiceActor.java| 16 --
 .../hdfs/server/namenode/FSNamesystem.java  | 38 -
 .../hadoop/hdfs/server/namenode/Namesystem.java | 22 
 .../sps/BlockStorageMovementNeeded.java | 18 ++-
 .../hdfs/server/namenode/sps/Context.java   | 28 --
 .../hdfs/server/namenode/sps/SPSService.java|  5 +-
 .../namenode/sps/StoragePolicySatisfier.java| 19 +++
 .../hdfs/server/sps/ExternalSPSContext.java | 57 +---
 .../sps/ExternalStoragePolicySatisfier.java |  2 +-
 .../src/site/markdown/ArchivalStorage.md|  2 +-
 .../TestPersistentStoragePolicySatisfier.java   | 10 +++-
 ...stStoragePolicySatisfierWithStripedFile.java |  2 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  4 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  2 +-
 18 files changed, 39 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index edfb391..bf0db6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -187,6 +187,7 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 
   @Override
   public Long getNextSPSPath() throws IOException {
+rpcServer.checkOperation(OperationCategory.READ, false);
 // not supported
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 87bd155..d8a3aa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4300,21 +4300,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check file has low redundancy blocks.
-   */
-  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
-boolean result = false;
-for (BlockInfo block : bc.getBlocks()) {
-  short expected = getExpectedRedundancyNum(block);
-  final NumberReplicas n = countNodes(block);
-  if (expected > n.liveReplicas()) {
-result = true;
-  }
-}
-return result;
-  }
-
-  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,
@@ -5011,25 +4996,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check whether file id has low redundancy blocks.
-   *
-   * @param inodeID
-   *  - inode id
-   */
-  public boolean hasLowRedundancyBlocks(long inodeID) {
-namesystem.readLock();
-try {
-  BlockCollection bc = namesystem.getBlockCollection(inodeID);
-  if (bc == null) {
-return false;
-  }
-  return hasLowRedundancyBlocks(bc);
-} finally {
-

[08/50] [abbrv] hadoop git commit: HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks before removing the xattr. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks 
before removing the xattr. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b360b16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b360b16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b360b16

Branch: refs/heads/trunk
Commit: 0b360b16ab8759e3db606ada3420f4e2f56235f3
Parents: 00cf207
Author: Uma Maheswara Rao G 
Authored: Mon Jul 10 18:00:58 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|  15 +++
 .../server/namenode/StoragePolicySatisfier.java |  20 +++-
 .../namenode/TestStoragePolicySatisfier.java| 102 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  90 
 4 files changed, 224 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b360b16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3385af6..988067c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4324,6 +4324,21 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Check file has low redundancy blocks.
+   */
+  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
+boolean result = false;
+for (BlockInfo block : bc.getBlocks()) {
+  short expected = getExpectedRedundancyNum(block);
+  final NumberReplicas n = countNodes(block);
+  if (expected > n.liveReplicas()) {
+result = true;
+  }
+}
+return result;
+  }
+
+  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b360b16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 1b2afa3..97cbf1b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -99,7 +99,10 @@ public class StoragePolicySatisfier implements Runnable {
 // Represents that, the analysis skipped due to some conditions.
 // Example conditions are if no blocks really exists in block collection or
 // if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED;
+BLOCKS_TARGET_PAIRING_SKIPPED,
+// Represents that, All the reported blocks are satisfied the policy but
+// some of the blocks are low redundant.
+FEW_LOW_REDUNDANCY_BLOCKS
   }
 
   public StoragePolicySatisfier(final Namesystem namesystem,
@@ -247,6 +250,14 @@ public class StoragePolicySatisfier implements Runnable {
   case FEW_BLOCKS_TARGETS_PAIRED:
 this.storageMovementsMonitor.add(blockCollectionID, false);
 break;
+  case FEW_LOW_REDUNDANCY_BLOCKS:
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Adding trackID " + blockCollectionID
+  + " back to retry queue as some of the blocks"
+  + " are low redundant.");
+}
+this.storageMovementNeeded.add(blockCollectionID);
+break;
   // Just clean Xattrs
   case BLOCKS_TARGET_PAIRING_SKIPPED:
   case BLOCKS_ALREADY_SATISFIED:
@@ -347,11 +358,16 @@ public class StoragePolicySatisfier implements Runnable {
 boolean computeStatus = computeBlockMovingInfos(blockMovingInfos,
 blockInfo, expectedStorageTypes, existing, storages);
 if (computeStatus
-&& status != 

[40/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8467ec24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
deleted file mode 100644
index ff277ba..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
+++ /dev/null
@@ -1,174 +0,0 @@
-package org.apache.hadoop.hdfs.server.sps;
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.FileIdCollector;
-import org.apache.hadoop.hdfs.server.namenode.sps.ItemInfo;
-import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is to scan the paths recursively. If file is directory, then it
- * will scan for files recursively. If the file is non directory, then it will
- * just submit the same file to process.
- */
-@InterfaceAudience.Private
-public class ExternalSPSFileIDCollector implements FileIdCollector {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(ExternalSPSFileIDCollector.class);
-  private Context cxt;
-  private DistributedFileSystem dfs;
-  private SPSService service;
-  private int maxQueueLimitToScan;
-
-  public ExternalSPSFileIDCollector(Context cxt, SPSService service) {
-this.cxt = cxt;
-this.service = service;
-this.maxQueueLimitToScan = service.getConf().getInt(
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY,
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT);
-try {
-  // TODO: probably we could get this dfs from external context? but this 
is
-  // too specific to external.
-  dfs = getFS(service.getConf());
-} catch (IOException e) {
-  LOG.error("Unable to get the filesystem. Make sure Namenode running and "
-  + "configured namenode address is correct.", e);
-}
-  }
-
-  private DistributedFileSystem getFS(Configuration conf) throws IOException {
-return (DistributedFileSystem) FileSystem
-.get(FileSystem.getDefaultUri(conf), conf);
-  }
-
-  /**
-   * Recursively scan the given path and add the file info to SPS service for
-   * processing.
-   */
-  private long processPath(long startID, String fullPath) {
-long pendingWorkCount = 0; // to be satisfied file counter
-for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
-  final DirectoryListing children;
-  try {
-children = dfs.getClient().listPaths(fullPath, lastReturnedName, 
false);
-  } catch (IOException e) {
-LOG.warn("Failed to list directory " + fullPath
-+ ". Ignore the directory and continue.", e);
-return pendingWorkCount;
-  }
-  if (children == null) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("The scanning start dir/sub dir " + fullPath
-  + " does not have childrens.");
-}
-return pendingWorkCount;
-  }
-
-  for (HdfsFileStatus child : children.getPartialListing()) {
-if (child.isFile()) {
-  service.addFileIdToProcess(new ItemInfo(startID, child.getFileId()),
-  false);
-  checkProcessingQueuesFree();
-  pendingWorkCount++; // increment to be satisfied file count
-} else {
-  String fullPathStr = child.getFullName(fullPath);
- 

[39/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13165: [SPS]: Collects successfully moved block details via IBR. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2acc50b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2acc50b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2acc50b8

Branch: refs/heads/trunk
Commit: 2acc50b826fa8b00f2b09d9546c4b3215b89d46d
Parents: 75ccc13
Author: Rakesh Radhakrishnan 
Authored: Sun Apr 29 11:06:59 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:05 2018 -0700

--
 .../DatanodeProtocolClientSideTranslatorPB.java |  11 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  25 ---
 .../server/blockmanagement/BlockManager.java|  86 +-
 .../sps/BlockMovementAttemptFinished.java   |  24 ++-
 .../common/sps/BlockStorageMovementTracker.java | 109 +---
 .../sps/BlocksMovementsStatusHandler.java   |  70 +---
 .../hdfs/server/datanode/BPServiceActor.java|  14 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +-
 .../datanode/StoragePolicySatisfyWorker.java|  48 ++
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  13 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   8 +-
 .../hdfs/server/namenode/FSDirectory.java   |   5 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  30 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  19 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  46 +++--
 .../sps/BlockStorageMovementAttemptedItems.java | 167 +--
 .../hdfs/server/namenode/sps/SPSService.java|  19 ++-
 .../namenode/sps/StoragePolicySatisfier.java| 154 +++--
 .../hdfs/server/protocol/DatanodeProtocol.java  |   4 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  32 ++--
 .../sps/ExternalStoragePolicySatisfier.java |   3 +-
 .../src/main/proto/DatanodeProtocol.proto   |   9 -
 .../src/main/resources/hdfs-default.xml |  41 +
 .../TestNameNodePrunesMissingStorages.java  |   4 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../SimpleBlocksMovementsStatusHandler.java |  88 ++
 .../server/datanode/TestBPOfferService.java |  12 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   7 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  76 +
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   5 +-
 .../namenode/TestNameNodeReconfigure.java   |  17 +-
 .../TestBlockStorageMovementAttemptedItems.java |  88 ++
 .../sps/TestStoragePolicySatisfier.java |  73 ++--
 ...stStoragePolicySatisfierWithStripedFile.java |  40 +++--
 .../sps/TestExternalStoragePolicySatisfier.java |  44 ++---
 42 files changed, 776 insertions(+), 659 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2acc50b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index dcc0705..e4125dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -139,8 +138,7 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   VolumeFailureSummary volumeFailureSummary,
   boolean requestFullBlockReportLease,
   @Nonnull 

[37/50] [abbrv] hadoop git commit: HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to 
minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75ccc139
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75ccc139
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75ccc139

Branch: refs/heads/trunk
Commit: 75ccc1396b67cdc0d4992a4af3911f9f88c2
Parents: 8467ec2
Author: Surendra Singh Lilhore 
Authored: Thu Mar 1 00:08:37 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:05 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../NamenodeProtocolServerSideTranslatorPB.java |  19 --
 .../NamenodeProtocolTranslatorPB.java   |  17 -
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 -
 .../hdfs/server/namenode/sps/Context.java   |  24 +-
 .../namenode/sps/DatanodeCacheManager.java  | 121 +++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java| 340 ++-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  16 -
 .../hdfs/server/sps/ExternalSPSContext.java |  32 +-
 .../src/main/proto/NamenodeProtocol.proto   |  25 --
 .../src/main/resources/hdfs-default.xml |  11 +
 .../src/site/markdown/ArchivalStorage.md|   2 +-
 .../TestStoragePolicySatisfyWorker.java |   3 +
 .../TestPersistentStoragePolicySatisfier.java   |   6 +
 .../TestStoragePolicySatisfierWithHA.java   |   3 +
 .../sps/TestStoragePolicySatisfier.java |   4 +
 ...stStoragePolicySatisfierWithStripedFile.java |  24 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |   3 +
 19 files changed, 431 insertions(+), 260 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ccc139/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b354d64..cf383d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -646,6 +646,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
   "dfs.storage.policy.satisfier.max.outstanding.paths";
   public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+  // SPS datanode cache config, defaulting to 5mins.
+  public static final String DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS =
+  "dfs.storage.policy.satisfier.datanode.cache.refresh.interval.ms";
+  public static final long DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS_DEFAULT =
+  30L;
 
   // SPS keytab configurations, by default it is disabled.
   public static final String  DFS_SPS_ADDRESS_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ccc139/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index ed176cc..e4283c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -277,21 +275,4 @@ public class NamenodeProtocolServerSideTranslatorPB 

[20/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
deleted file mode 100644
index 9f733ff..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ /dev/null
@@ -1,1775 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
- * moved and finding its suggested target locations to move.
- */
-public class TestStoragePolicySatisfier {
-
-  {
-GenericTestUtils.setLogLevel(
-getLogger(FSTreeTraverser.class), Level.DEBUG);
-  }
-
-  private static final String ONE_SSD = "ONE_SSD";
-  private static final String COLD = "COLD";
-  private static final Logger LOG =
-  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
-  private final Configuration config = new HdfsConfiguration();
-  private StorageType[][] allDiskTypes =
-  new StorageType[][]{{StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK}};
-  private MiniDFSCluster hdfsCluster = null;
-  final private int numOfDatanodes = 3;
-  

[15/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block 
storage movements. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/00eceed2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/00eceed2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/00eceed2

Branch: refs/heads/trunk
Commit: 00eceed233d6e80d5c7137bf5b5286746ec4d5fb
Parents: bfd3f8b
Author: Uma Maheswara Rao G 
Authored: Thu Oct 12 17:17:51 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |  12 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 150 +++-
 .../blockmanagement/DatanodeDescriptor.java |  50 ++-
 .../server/blockmanagement/DatanodeManager.java | 104 --
 .../hdfs/server/datanode/BPOfferService.java|   3 +-
 .../hdfs/server/datanode/BPServiceActor.java|  33 +-
 .../datanode/BlockStorageMovementTracker.java   |  80 ++---
 .../datanode/StoragePolicySatisfyWorker.java| 214 
 .../BlockStorageMovementAttemptedItems.java | 299 
 .../BlockStorageMovementInfosBatch.java |  61 
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +-
 .../server/namenode/StoragePolicySatisfier.java | 343 ++-
 .../protocol/BlockStorageMovementCommand.java   |  99 ++
 .../BlocksStorageMoveAttemptFinished.java   |  48 +++
 .../protocol/BlocksStorageMovementResult.java   |  74 
 .../hdfs/server/protocol/DatanodeProtocol.java  |   5 +-
 .../src/main/proto/DatanodeProtocol.proto   |  30 +-
 .../src/main/resources/hdfs-default.xml |  21 +-
 .../src/site/markdown/ArchivalStorage.md|   6 +-
 .../TestNameNodePrunesMissingStorages.java  |   5 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../server/datanode/TestBPOfferService.java |   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   6 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  52 ++-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   6 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../TestBlockStorageMovementAttemptedItems.java | 145 
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../namenode/TestStoragePolicySatisfier.java| 115 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  20 +-
 37 files changed, 908 insertions(+), 1135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c90ca33..d577e4c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -629,11 +629,15 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =
-  5 * 60 * 1000;
+  1 * 60 * 1000;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-  20 * 60 * 1000;
+  5 * 60 * 1000;
+  public static final String 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_KEY =
+  "dfs.storage.policy.satisfier.low.max-streams.preference";
+  public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_DEFAULT =
+  false;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

[45/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39ed3a66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39ed3a66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39ed3a66

Branch: refs/heads/trunk
Commit: 39ed3a66dbb01383ed16b141183fc48bfd2e613d
Parents: dfcb331
Author: Uma Maheswara Rao G 
Authored: Mon Jul 23 16:05:35 2018 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   26 -
 .../hadoop/hdfs/protocol/ClientProtocol.java|   29 -
 .../hadoop/hdfs/protocol/HdfsConstants.java |   40 -
 .../ClientNamenodeProtocolTranslatorPB.java |   36 -
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   37 -
 .../src/main/proto/ClientNamenodeProtocol.proto |   26 -
 .../federation/router/RouterRpcServer.java  |   14 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |4 -
 ...tNamenodeProtocolServerSideTranslatorPB.java |   39 -
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   99 -
 .../hadoop/hdfs/server/balancer/ExitStatus.java |3 +-
 .../server/blockmanagement/BlockManager.java|   21 +-
 .../blockmanagement/DatanodeDescriptor.java |   68 -
 .../server/blockmanagement/DatanodeManager.java |   94 +-
 .../hdfs/server/datanode/BPOfferService.java|   12 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |   12 -
 .../datanode/StoragePolicySatisfyWorker.java|  217 ---
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   21 -
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   12 -
 .../hdfs/server/namenode/FSNamesystem.java  |8 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   36 -
 .../sps/BlockStorageMovementNeeded.java |  121 +-
 .../hdfs/server/namenode/sps/Context.java   |5 -
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   63 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |  189 --
 .../sps/IntraSPSNameNodeFileIdCollector.java|  185 --
 .../hdfs/server/namenode/sps/SPSService.java|5 -
 .../namenode/sps/StoragePolicySatisfier.java|   44 -
 .../sps/StoragePolicySatisfyManager.java|  156 +-
 .../hdfs/server/sps/ExternalSPSContext.java |5 -
 .../sps/ExternalStoragePolicySatisfier.java |9 -
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   87 +-
 .../src/main/proto/DatanodeProtocol.proto   |   30 -
 .../src/main/resources/hdfs-default.xml |   14 +-
 .../src/site/markdown/ArchivalStorage.md|   22 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   40 +
 .../server/datanode/TestBPOfferService.java |4 -
 .../TestStoragePolicySatisfyWorker.java |  241 ---
 .../hadoop/hdfs/server/mover/TestMover.java |7 +-
 .../namenode/TestNameNodeReconfigure.java   |   32 +-
 .../TestPersistentStoragePolicySatisfier.java   |  124 +-
 .../TestStoragePolicySatisfierWithHA.java   |  152 +-
 .../TestBlockStorageMovementAttemptedItems.java |3 +-
 .../sps/TestStoragePolicySatisfier.java | 1825 --
 ...stStoragePolicySatisfierWithStripedFile.java |   87 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 1433 +-
 .../hdfs/tools/TestStoragePolicyCommands.java   |2 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |   56 +-
 48 files changed, 1517 insertions(+), 4278 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index b6f9bdd..adbb133 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,7 +123,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
-import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -3110,10 +3109,6 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public boolean 

[42/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 18acb50..d9a93fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -32,34 +32,57 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_KEYTAB_FILE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_MAX_OUTSTANDING_PATHS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.URI;
+import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
 import 
org.apache.hadoop.hdfs.server.namenode.sps.BlockStorageMovementAttemptedItems;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
-import org.apache.hadoop.hdfs.server.namenode.sps.TestStoragePolicySatisfier;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.SecurityUtil;
@@ -67,29 +90,57 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
 /**
  * Tests the external sps service plugins.
  */
-public class TestExternalStoragePolicySatisfier
-extends TestStoragePolicySatisfier {
+public class TestExternalStoragePolicySatisfier {
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
   private StorageType[][] allDiskTypes =
  

[43/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
index f85769f..f48521b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.StorageTypeNodePair;
+import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -52,7 +53,7 @@ public class TestBlockStorageMovementAttemptedItems {
   @Before
   public void setup() throws Exception {
 Configuration config = new HdfsConfiguration();
-Context ctxt = Mockito.mock(IntraSPSNameNodeContext.class);
+Context ctxt = Mockito.mock(ExternalSPSContext.class);
 SPSService sps = new StoragePolicySatisfier(config);
 Mockito.when(ctxt.isRunning()).thenReturn(true);
 Mockito.when(ctxt.isInSafeMode()).thenReturn(false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
deleted file mode 100644
index ec5307b..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ /dev/null
@@ -1,1825 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.Block;
-import 

[35/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13057: [SPS]: Revisit configurations to make SPS service modes 
internal/external/none. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b83110d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b83110d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b83110d

Branch: refs/heads/trunk
Commit: 3b83110d5ed582b9f913ecf3f62ce410535f8fca
Parents: b0cb8d9
Author: Uma Maheswara Rao G 
Authored: Fri Jan 26 08:57:29 2018 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  39 
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +-
 .../server/blockmanagement/BlockManager.java| 105 +++---
 .../hdfs/server/namenode/FSNamesystem.java  |   6 +-
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  34 ++--
 .../sps/BlockStorageMovementNeeded.java |   2 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   3 +
 .../hdfs/server/namenode/sps/SPSService.java|   4 +-
 .../namenode/sps/StoragePolicySatisfier.java|  17 +-
 .../server/sps/ExternalSPSFileIDCollector.java  |  32 ++-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  16 +-
 .../src/main/resources/hdfs-default.xml |  11 +-
 .../src/site/markdown/ArchivalStorage.md|  17 +-
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  45 +++--
 .../hdfs/server/mover/TestStorageMover.java |   4 +-
 .../namenode/TestNameNodeReconfigure.java   | 105 +-
 .../TestPersistentStoragePolicySatisfier.java   |   9 +-
 .../TestStoragePolicySatisfierWithHA.java   |  12 +-
 .../sps/TestStoragePolicySatisfier.java | 202 +++
 ...stStoragePolicySatisfierWithStripedFile.java |  17 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 112 +++---
 .../hdfs/tools/TestStoragePolicyCommands.java   |   5 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  14 +-
 25 files changed, 500 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index aabcdd9..ab48dcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -129,6 +129,45 @@ public final class HdfsConstants {
   }
 
   /**
+   * Storage policy satisfier service modes.
+   */
+  public enum StoragePolicySatisfierMode {
+
+/**
+ * This mode represents that SPS service is running inside Namenode and can
+ * accept any SPS call request.
+ */
+INTERNAL,
+
+/**
+ * This mode represents that SPS service is running outside Namenode as an
+ * external service and can accept any SPS call request.
+ */
+EXTERNAL,
+
+/**
+ * This mode represents that SPS service is disabled and cannot accept any
+ * SPS call request.
+ */
+NONE;
+
+private static final Map MAP =
+new HashMap<>();
+
+static {
+  for (StoragePolicySatisfierMode a : values()) {
+MAP.put(a.name(), a);
+  }
+}
+
+/** Convert the given String to a StoragePolicySatisfierMode. */
+public static StoragePolicySatisfierMode fromString(String s) {
+  return MAP.get(StringUtils.toUpperCase(s));
+}
+  }
+
+
+  /**
* Storage policy satisfy path status.
*/
   public enum StoragePolicySatisfyPathStatus {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f1a59d3..bf29d14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import 

[29/50] [abbrv] hadoop git commit: HDFS-13033: [SPS]: Implement a mechanism to do file block movements for external SPS. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13033: [SPS]: Implement a mechanism to do file block movements for 
external SPS. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0cb8d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0cb8d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0cb8d9b

Branch: refs/heads/trunk
Commit: b0cb8d9bb44c963ae686d2b5c1b70bc76b955e10
Parents: 3159b39
Author: Uma Maheswara Rao G 
Authored: Tue Jan 23 16:19:46 2018 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:03 2018 -0700

--
 .../hdfs/server/balancer/NameNodeConnector.java |   8 +
 .../hdfs/server/common/sps/BlockDispatcher.java | 186 +
 .../sps/BlockMovementAttemptFinished.java   |  80 ++
 .../server/common/sps/BlockMovementStatus.java  |  53 
 .../common/sps/BlockStorageMovementTracker.java | 184 +
 .../sps/BlocksMovementsStatusHandler.java   |  95 +++
 .../hdfs/server/common/sps/package-info.java|  27 ++
 .../datanode/BlockStorageMovementTracker.java   | 186 -
 .../datanode/StoragePolicySatisfyWorker.java| 271 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   4 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |   3 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  12 +-
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   3 +-
 .../hdfs/server/namenode/sps/SPSService.java|  14 +-
 .../namenode/sps/StoragePolicySatisfier.java|  30 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java| 233 
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../sps/TestStoragePolicySatisfier.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  69 -
 19 files changed, 997 insertions(+), 469 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0cb8d9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index b0dd779..6bfbbb3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -269,6 +269,14 @@ public class NameNodeConnector implements Closeable {
 }
   }
 
+  /**
+   * Returns fallbackToSimpleAuth. This will be true or false during calls to
+   * indicate if a secure client falls back to simple auth.
+   */
+  public AtomicBoolean getFallbackToSimpleAuth() {
+return fallbackToSimpleAuth;
+  }
+
   @Override
   public void close() {
 keyManager.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0cb8d9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
new file mode 100644
index 000..f87fcae
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common.sps;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import 

[28/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-08-12 Thread umamahesh
HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for 
external/internal implementations. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d4f74e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d4f74e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d4f74e7

Branch: refs/heads/trunk
Commit: 8d4f74e7339abc77dc0daa162d7bd2814bd79b3d
Parents: 05d4daf
Author: Rakesh Radhakrishnan 
Authored: Fri Jan 19 08:51:49 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:03 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|  61 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirectory.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  10 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |  44 
 .../namenode/sps/BlockMovementListener.java |  40 
 .../sps/BlockStorageMovementAttemptedItems.java |  28 +--
 .../sps/BlockStorageMovementNeeded.java | 207 ---
 .../hdfs/server/namenode/sps/Context.java   |  43 ++--
 .../server/namenode/sps/FileIdCollector.java|  43 
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |  62 ++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  62 ++
 .../sps/IntraSPSNameNodeFileIdCollector.java| 178 
 .../hdfs/server/namenode/sps/ItemInfo.java  |  81 
 .../hdfs/server/namenode/sps/SPSPathIds.java|  63 ++
 .../hdfs/server/namenode/sps/SPSService.java| 107 ++
 .../namenode/sps/StoragePolicySatisfier.java| 175 +++-
 .../TestBlockStorageMovementAttemptedItems.java |  19 +-
 .../sps/TestStoragePolicySatisfier.java | 111 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  19 +-
 20 files changed, 938 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d4f74e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5ee4026..d12cb01 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -93,8 +93,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -434,7 +434,8 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private Context spsctxt = null;
+  private final SPSPathIds spsPaths;
+
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -481,8 +482,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
-sps = new StoragePolicySatisfier(spsctxt);
+sps = new StoragePolicySatisfier(conf);
+spsPaths = new SPSPathIds();
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5041,8 +5042,7 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-// TODO: FSDirectory will get removed via HDFS-12911 modularization work
-sps.start(false, namesystem.getFSDirectory());
+sps.start(false);
   }
 
   /**
@@ -5078,8 +5078,7 @@ public class BlockManager implements BlockStatsMXBean {
   

[11/50] [abbrv] hadoop git commit: HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e820f16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e820f16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e820f16

Branch: refs/heads/trunk
Commit: 0e820f16af309cc8476edba448dd548686431133
Parents: 3b601f2
Author: Uma Maheswara Rao G 
Authored: Thu Aug 17 13:21:07 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../server/blockmanagement/BlockManager.java| 104 +++
 .../BlockStorageMovementAttemptedItems.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  20 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  22 ++--
 .../server/namenode/StoragePolicySatisfier.java |  20 ++--
 .../protocol/BlocksStorageMovementResult.java   |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  11 +-
 .../src/main/resources/hdfs-default.xml |  10 +-
 .../src/site/markdown/ArchivalStorage.md|  14 +--
 .../src/site/markdown/HDFSCommands.md   |   2 +-
 .../TestStoragePolicySatisfyWorker.java |   2 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  22 ++--
 .../hdfs/server/mover/TestStorageMover.java |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  99 --
 .../TestPersistentStoragePolicySatisfier.java   |   6 +-
 .../namenode/TestStoragePolicySatisfier.java|  35 +--
 .../TestStoragePolicySatisfierWithHA.java   |  10 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   8 ++
 .../hdfs/tools/TestStoragePolicyCommands.java   |  21 ++--
 22 files changed, 265 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e820f16/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 38be348b..bc6e7a4 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -62,7 +62,7 @@ function hadoop_usage
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
-  hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage 
policies"
+  hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e820f16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b5a2a5a..e66806f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -614,10 +614,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
   // SPS related configurations
-  public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
-  "dfs.storage.policy.satisfier.activate";
-  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
-  true;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY =
+  "dfs.storage.policy.satisfier.enabled";
+  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
+  false;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e820f16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[44/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
deleted file mode 100644
index d6e92d2..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-
-/**
- * This class handles the internal SPS block movements. This will assign block
- * movement tasks to target datanode descriptors.
- */
-@InterfaceAudience.Private
-public class IntraSPSNameNodeBlockMoveTaskHandler
-implements BlockMoveTaskHandler {
-
-  private BlockManager blockManager;
-  private Namesystem namesystem;
-
-  public IntraSPSNameNodeBlockMoveTaskHandler(BlockManager blockManager,
-  Namesystem namesytem) {
-this.blockManager = blockManager;
-this.namesystem = namesytem;
-  }
-
-  @Override
-  public void submitMoveTask(BlockMovingInfo blkMovingInfo) throws IOException 
{
-namesystem.readLock();
-try {
-  DatanodeDescriptor dn = blockManager.getDatanodeManager()
-  .getDatanode(blkMovingInfo.getTarget().getDatanodeUuid());
-  if (dn == null) {
-throw new IOException("Failed to schedule block movement task:"
-+ blkMovingInfo + " as target datanode: "
-+ blkMovingInfo.getTarget() + " doesn't exists");
-  }
-  dn.incrementBlocksScheduled(blkMovingInfo.getTargetStorageType());
-  dn.addBlocksToMoveStorage(blkMovingInfo);
-} finally {
-  namesystem.readUnlock();
-}
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
deleted file mode 100644
index 2bf4810..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-
-import java.io.IOException;
-import 

[24/50] [abbrv] hadoop git commit: HDFS-12310: [SPS]: Provide an option to track the status of in progress requests. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12310: [SPS]: Provide an option to track the status of in progress 
requests. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68017e33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68017e33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68017e33

Branch: refs/heads/trunk
Commit: 68017e3349e3b71a9c49f2ccea2558231ff8485d
Parents: 5780f06
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 3 08:18:14 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  22 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  21 
 .../hadoop/hdfs/protocol/HdfsConstants.java |  27 +
 .../ClientNamenodeProtocolTranslatorPB.java |  20 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  33 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  17 ++-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  23 +++-
 .../server/blockmanagement/BlockManager.java|  12 ++
 .../namenode/BlockStorageMovementNeeded.java| 109 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 ++-
 .../server/namenode/StoragePolicySatisfier.java |   8 ++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  35 +-
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestPersistentStoragePolicySatisfier.java   |   2 +-
 .../namenode/TestStoragePolicySatisfier.java|  67 
 .../hdfs/tools/TestStoragePolicyCommands.java   |  18 +++
 16 files changed, 424 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68017e33/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7337aa2..471ab2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -3169,4 +3170,25 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
+
+  /**
+   * Check the storage policy satisfy status of the path for which
+   * {@link DFSClient#satisfyStoragePolicy(String)} is called.
+   *
+   * @return Storage policy satisfy status.
+   * 
+   * PENDING if path is in queue and not processed for satisfying
+   * the policy.
+   * IN_PROGRESS if satisfying the storage policy for path.
+   * SUCCESS if storage policy satisfied for the path.
+   * NOT_AVAILABLE if
+   * {@link DFSClient#satisfyStoragePolicy(String)} not called for
+   * path or SPS work is already finished.
+   * 
+   * @throws IOException
+   */
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+return namenode.checkStoragePolicySatisfyPathStatus(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68017e33/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 81d7c91..360fd63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 

[14/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d3c5cb1..2f621e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -156,7 +156,7 @@ import 
org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1517,14 +1517,15 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   boolean requestFullBlockReportLease,
   @Nonnull SlowPeerReports slowPeers,
   @Nonnull SlowDiskReports slowDisks,
-  BlocksStorageMovementResult[] blkMovementStatus) throws IOException {
+  BlocksStorageMoveAttemptFinished storageMovementFinishedBlks)
+  throws IOException {
 checkNNStartup();
 verifyRequest(nodeReg);
 return namesystem.handleHeartbeat(nodeReg, report,
 dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
 failedVolumes, volumeFailureSummary, requestFullBlockReportLease,
 slowPeers, slowDisks,
-blkMovementStatus);
+storageMovementFinishedBlks);
   }
 
   @Override // DatanodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a4372d5..a28a806 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -44,7 +46,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.util.Daemon;
@@ -82,25 +84,38 @@ public class StoragePolicySatisfier implements Runnable {
   /**
* Represents the collective analysis status for all blocks.
*/
-  private enum BlocksMovingAnalysisStatus {
-// Represents that, the analysis skipped due to some conditions. A such
-// condition is if block collection is in incomplete state.
-ANALYSIS_SKIPPED_FOR_RETRY,
-// Represents that, all block storage movement needed blocks found its
-// targets.
-ALL_BLOCKS_TARGETS_PAIRED,
-// Represents that, only fewer or none of the block storage movement needed
-// block found its eligible targets.
-FEW_BLOCKS_TARGETS_PAIRED,
-// Represents that, none of the blocks found for block storage movements.
-BLOCKS_ALREADY_SATISFIED,
-// Represents that, the analysis skipped due to some conditions.
-// Example conditions are if no blocks really exists in block collection or
-// if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED,
-// Represents that, All the reported blocks are satisfied the policy but
-// some of the blocks are low redundant.
-FEW_LOW_REDUNDANCY_BLOCKS
+  private 

[02/50] [abbrv] hadoop git commit: HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when dropSPSWork() called. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when 
dropSPSWork() called. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e53f89cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e53f89cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e53f89cc

Branch: refs/heads/trunk
Commit: e53f89ccc361615b254e3ecd270728573908c071
Parents: 5ce332d
Author: Uma Maheswara Rao G 
Authored: Tue May 30 18:12:17 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:05:59 2018 -0700

--
 .../hdfs/server/datanode/BlockStorageMovementTracker.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e53f89cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 99858bc..c7e952b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -88,13 +88,17 @@ public class BlockStorageMovementTracker implements 
Runnable {
   long trackId = result.getTrackId();
   List> blocksMoving = moverTaskFutures
   .get(trackId);
+  if (blocksMoving == null) {
+LOG.warn("Future task doesn't exist for trackId " + trackId);
+continue;
+  }
   blocksMoving.remove(future);
 
   List resultPerTrackIdList =
   addMovementResultToTrackIdList(result);
 
   // Completed all the scheduled blocks movement under this 'trackId'.
-  if (blocksMoving.isEmpty()) {
+  if (blocksMoving.isEmpty() || moverTaskFutures.get(trackId) == null) 
{
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G

2018-08-12 Thread umamahesh
HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68af4e19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68af4e19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68af4e19

Branch: refs/heads/trunk
Commit: 68af4e199a754ca6c727b844a22ecabe9dc7cc68
Parents: 5eb24ef
Author: Rakesh Radhakrishnan 
Authored: Fri Jul 14 22:36:09 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../src/site/markdown/ArchivalStorage.md| 51 ++--
 1 file changed, 48 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68af4e19/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index a56cf8b..9098616 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -97,8 +97,44 @@ The effective storage policy can be retrieved by the 
"[`storagepolicies -getStor
 
 The default storage type of a datanode storage location will be DISK if it 
does not have a storage type tagged explicitly.
 
-Mover - A New Data Migration Tool
--
+Storage Policy Based Data Movement
+--
+
+Setting a new storage policy on already existing file/dir will change the 
policy in Namespace, but it will not move the blocks physically across storage 
medias.
+Following 2 options will allow users to move the blocks based on new policy 
set. So, once user change/set to a new policy on file/directory, user should 
also perform one of the following options to achieve the desired data movement. 
Note that both options cannot be allowed to run simultaneously.
+
+### Storage Policy Satisfier (SPS)
+
+When user changes the storage policy on a file/directory, user can call 
`HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new 
policy set.
+The SPS daemon thread runs along with namenode and periodically scans for the 
storage mismatches between new policy set and the physical blocks placed. This 
will only track the files/directories for which user invoked 
satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, 
then it will schedule block movement tasks to datanodes. A Coordinator 
DataNode(C-DN) will track all block movements associated to a file and notify 
to namenode about movement success/failure. If there are any failures in 
movement, the SPS will re-attempt by sending new block movement task.
+
+SPS can be activated and deactivated dynamically without restarting the 
Namenode.
+
+Detailed design documentation can be found at [Storage Policy Satisfier(SPS) 
(HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
+
+* **Note**: When user invokes `satisfyStoragePolicy()` API on a directory, SPS 
will consider the files which are immediate to that directory. Sub-directories 
won't be considered for satisfying the policy. Its user responsibility to call 
this API on directories recursively, to track all files under the sub tree.
+
+* HdfsAdmin API :
+`public void satisfyStoragePolicy(final Path path) throws IOException`
+
+* Arguments :
+
+| | |
+|: |: |
+| `path` | A path which requires blocks storage movement. |
+
+Configurations:
+
+*   **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate 
SPS. Configuring true represents SPS is
+   activated and vice versa.
+
+*   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to 
re-check the processed block storage movement
+   command results from Co-ordinator Datanode.
+
+*   **dfs.storage.policy.satisfier.self.retry.timeout.millis** - A timeout to 
retry if no block movement results reported from
+   Co-ordinator Datanode in this configured timeout.
+
+### Mover - A New Data Migration Tool
 
 A new data migration tool is added for archiving data. The tool is similar to 
Balancer. It periodically scans the files in HDFS to check if the block 
placement satisfies the storage policy. For the blocks violating the storage 
policy, it moves the replicas to a different storage type in order to fulfill 
the storage policy requirement. Note that it always tries to move block 
replicas within the same node whenever possible. If that is not possible (e.g. 
when a node doesn’t have the target storage type) then it will copy the block 
replicas to another node over the network.
 
@@ -115,6 +151,10 @@ A new data migration 

[32/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 85a101f..47ea39f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -252,8 +252,8 @@ public class TestNameNodeReconfigure {
 // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
 assertEquals("SPS shouldn't start as "
 + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled", false,
-nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL, false);
 
@@ -280,8 +280,8 @@ public class TestNameNodeReconfigure {
   fail("ReconfigurationException expected");
 } catch (ReconfigurationException e) {
   GenericTestUtils.assertExceptionContains(
-  "For enabling or disabling storage policy satisfier, we must "
-  + "pass either none/internal/external string value only",
+  "For enabling or disabling storage policy satisfier, must "
+  + "pass either internal/external/none string value only",
   e.getCause());
 }
 
@@ -301,8 +301,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.EXTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-false, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+false, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.EXTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -342,8 +342,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-true, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+true, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.INTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -353,7 +353,8 @@ public class TestNameNodeReconfigure {
   void verifySPSEnabled(final NameNode nameNode, String property,
   StoragePolicySatisfierMode expected, boolean isSatisfierRunning) {
 assertEquals(property + " has wrong value", isSatisfierRunning, nameNode
-.getNamesystem().getBlockManager().isStoragePolicySatisfierRunning());
+.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 String actual = nameNode.getConf().get(property,
 DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 assertEquals(property + " has wrong value", expected,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index b84214c..9f98777 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -389,7 +389,8 @@ public class TestPersistentStoragePolicySatisfier {
   fs.setStoragePolicy(testFile, ONE_SSD);
   fs.satisfyStoragePolicy(testFile);
 
-  cluster.getNamesystem().getBlockManager().disableSPS();
+  

[10/50] [abbrv] hadoop git commit: HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ea24fc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ea24fc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ea24fc0

Branch: refs/heads/trunk
Commit: 7ea24fc06c081e2ba6f5f66d212abb14b80c9064
Parents: 0e820f1
Author: Uma Maheswara Rao G 
Authored: Wed Aug 23 15:37:03 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|  21 +-
 .../server/blockmanagement/DatanodeManager.java |  14 +-
 .../hdfs/server/datanode/BPOfferService.java|   1 +
 .../BlockStorageMovementAttemptedItems.java |  95 +---
 .../namenode/BlockStorageMovementNeeded.java| 233 ++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  91 +++-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 108 ++---
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../TestBlockStorageMovementAttemptedItems.java |  34 +--
 .../TestPersistentStoragePolicySatisfier.java   | 104 +
 .../namenode/TestStoragePolicySatisfier.java| 127 +-
 14 files changed, 589 insertions(+), 259 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ea24fc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bcc07cc..b53d946 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementNeeded;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -431,9 +430,6 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private final BlockStorageMovementNeeded storageMovementNeeded =
-  new BlockStorageMovementNeeded();
-
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -480,8 +476,7 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
-conf);
+sps = new StoragePolicySatisfier(namesystem, this, conf);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5017,20 +5012,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Set file block collection for which storage movement needed for its 
blocks.
-   *
-   * @param id
-   *  - file block collection id.
-   */
-  public void satisfyStoragePolicy(long id) {
-storageMovementNeeded.add(id);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Added block collection id {} to block "
-  + "storageMovementNeeded queue", id);
-}
-  }
-
-  /**
* Gets the storage policy satisfier instance.
*
* @return sps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ea24fc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 

[12/50] [abbrv] hadoop git commit: HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy storage policy of all the files under the given dir. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy 
storage policy of all the files under the given dir. Contributed by Surendra 
Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfd3f8bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfd3f8bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfd3f8bd

Branch: refs/heads/trunk
Commit: bfd3f8bd8a9ae2186ec3e4addc71f912ec7b8923
Parents: 7ea24fc
Author: Uma Maheswara Rao G 
Authored: Sat Sep 30 06:31:52 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  22 +-
 .../BlockStorageMovementAttemptedItems.java |   8 +-
 .../namenode/BlockStorageMovementNeeded.java| 277 +++---
 .../server/namenode/ReencryptionHandler.java|   1 +
 .../server/namenode/StoragePolicySatisfier.java |  43 ++-
 .../src/main/resources/hdfs-default.xml |  23 ++
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../TestPersistentStoragePolicySatisfier.java   |   8 +-
 .../namenode/TestStoragePolicySatisfier.java| 377 ++-
 11 files changed, 689 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd3f8bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e66806f..c90ca33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -618,6 +618,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.enabled";
   public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
   false;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY =
+  "dfs.storage.policy.satisfier.queue.limit";
+  public static final int  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT =
+  1000;
+  public static final String DFS_SPS_WORK_MULTIPLIER_PER_ITERATION =
+  "dfs.storage.policy.satisfier.work.multiplier.per.iteration";
+  public static final int DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT =
+  1;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd3f8bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index f5ceeaf..c26599c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1457,7 +1457,27 @@ public class DFSUtil {
 "It should be a positive, non-zero integer value.");
 return blocksReplWorkMultiplier;
   }
-  
+
+  /**
+   * Get DFS_SPS_WORK_MULTIPLIER_PER_ITERATION from
+   * configuration.
+   *
+   * @param conf Configuration
+   * @return Value of DFS_SPS_WORK_MULTIPLIER_PER_ITERATION
+   */
+  public static int getSPSWorkMultiplier(Configuration conf) {
+int spsWorkMultiplier = conf
+.getInt(
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION,
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
+Preconditions.checkArgument(
+(spsWorkMultiplier > 0),
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION +
+" = '" + spsWorkMultiplier + "' is invalid. " +
+"It should be a positive, non-zero integer value.");
+return spsWorkMultiplier;
+  }
+
   /**
* Get SPNEGO keytab Key from configuration
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd3f8bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 

[19/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
deleted file mode 100644
index 6991ad2..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
- * to be moved and finding its expected target locations in order to satisfy 
the
- * storage policy.
- */
-public class TestStoragePolicySatisfierWithStripedFile {
-
-  private static final Logger LOG = LoggerFactory
-  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
-
-  private final int stripesPerBlock = 2;
-
-  private ErasureCodingPolicy ecPolicy;
-  private int dataBlocks;
-  private int parityBlocks;
-  private int cellSize;
-  private int defaultStripeBlockSize;
-
-  private ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
-  }
-
-  /**
-   * Initialize erasure coding policy.
-   */
-  @Before
-  public void init(){
-ecPolicy = getEcPolicy();
-dataBlocks = ecPolicy.getNumDataUnits();
-parityBlocks = ecPolicy.getNumParityUnits();
-cellSize = ecPolicy.getCellSize();
-defaultStripeBlockSize = cellSize * stripesPerBlock;
-  }
-
-  /**
-   * Tests to verify that all the striped blocks(data + parity blocks) are
-   * moving to satisfy the storage policy.
-   */
-  @Test(timeout = 30)
-  public void testMoverWithFullStripe() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
-int storagesPerDatanode = 2;
-long capacity = 20 * defaultStripeBlockSize;
-long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
-for (int i = 0; i < numOfDatanodes; i++) {
-  for (int j = 0; j < storagesPerDatanode; j++) {
-capacities[i][j] = capacity;
-  }
-}
-
-final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
-initConfWithStripe(conf, defaultStripeBlockSize);
-final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-.numDataNodes(numOfDatanodes)
-.storagesPerDatanode(storagesPerDatanode)
-   

[38/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2acc50b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
index 7580ba9..f5225d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
@@ -20,13 +20,10 @@ package org.apache.hadoop.hdfs.server.sps;
 
 import java.io.IOException;
 import java.net.Socket;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -39,7 +36,6 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
@@ -48,15 +44,14 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.balancer.KeyManager;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
+import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementAttemptFinished;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementStatus;
 import org.apache.hadoop.hdfs.server.common.sps.BlockStorageMovementTracker;
 import org.apache.hadoop.hdfs.server.common.sps.BlocksMovementsStatusHandler;
-import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMoveTaskHandler;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
@@ -105,12 +100,14 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 int ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
 blkDispatcher = new BlockDispatcher(HdfsConstants.READ_TIMEOUT,
 ioFileBufferSize, connectToDnViaHostname);
+
+startMovementTracker();
   }
 
   /**
* Initializes block movement tracker daemon and starts the thread.
*/
-  public void init() {
+  private void startMovementTracker() {
 movementTrackerThread = new Daemon(this.blkMovementTracker);
 movementTrackerThread.setName("BlockStorageMovementTracker");
 movementTrackerThread.start();
@@ -156,24 +153,16 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 // dn.incrementBlocksScheduled(blkMovingInfo.getTargetStorageType());
 LOG.debug("Received BlockMovingTask {}", blkMovingInfo);
 BlockMovingTask blockMovingTask = new BlockMovingTask(blkMovingInfo);
-Future moveCallable = mCompletionServ
-.submit(blockMovingTask);
-blkMovementTracker.addBlock(blkMovingInfo.getBlock(), moveCallable);
+mCompletionServ.submit(blockMovingTask);
   }
 
   private class ExternalBlocksMovementsStatusHandler
-  extends BlocksMovementsStatusHandler {
+  implements BlocksMovementsStatusHandler {
 @Override
-public void handle(
-List moveAttemptFinishedBlks) {
-  List blocks = new ArrayList<>();
-  for (BlockMovementAttemptFinished item : moveAttemptFinishedBlks) {
-blocks.add(item.getBlock());
-  }
-  BlocksStorageMoveAttemptFinished blkAttempted =
-  new BlocksStorageMoveAttemptFinished(
-  blocks.toArray(new Block[blocks.size()]));
-  service.notifyStorageMovementAttemptFinishedBlks(blkAttempted);
+public void handle(BlockMovementAttemptFinished attemptedMove) {
+  service.notifyStorageMovementAttemptFinishedBlk(
+  attemptedMove.getTargetDatanode(), attemptedMove.getTargetType(),
+  attemptedMove.getBlock());
 }
   }
 
@@ -194,6 +183,7 @@ public class 

[31/50] [abbrv] hadoop git commit: HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3de4fb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3de4fb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3de4fb2

Branch: refs/heads/trunk
Commit: d3de4fb2a084cbadab8ef91f11aa7732d3b0f308
Parents: 5845c36
Author: Surendra Singh Lilhore 
Authored: Mon Jan 29 23:59:55 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +-
 .../server/blockmanagement/BlockManager.java|  33 +++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  15 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  41 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  11 ++
 .../hdfs/server/namenode/sps/SPSPathIds.java|   8 +-
 .../namenode/sps/StoragePolicySatisfier.java|   6 +-
 .../hdfs/server/sps/ExternalSPSContext.java |   4 +
 .../sps/ExternalStoragePolicySatisfier.java |  30 ++-
 .../sps/TestStoragePolicySatisfier.java |   7 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 195 ++-
 11 files changed, 323 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3de4fb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bf29d14..b354d64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -614,7 +614,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.mover.max-no-move-interval";
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
-  // SPS related configurations
+  // StoragePolicySatisfier (SPS) related configurations
   public static final String  DFS_STORAGE_POLICY_SATISFIER_MODE_KEY =
   "dfs.storage.policy.satisfier.mode";
   public static final String DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT =
@@ -643,6 +643,18 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.low.max-streams.preference";
   public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_DEFAULT =
   true;
+  public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
+  "dfs.storage.policy.satisfier.max.outstanding.paths";
+  public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+
+  // SPS keytab configurations, by default it is disabled.
+  public static final String  DFS_SPS_ADDRESS_KEY =
+  "dfs.storage.policy.satisfier.address";
+  public static final String  DFS_SPS_ADDRESS_DEFAULT= "0.0.0.0:0";
+  public static final String  DFS_SPS_KEYTAB_FILE_KEY =
+  "dfs.storage.policy.satisfier.keytab.file";
+  public static final String  DFS_SPS_KERBEROS_PRINCIPAL_KEY =
+  "dfs.storage.policy.satisfier.kerberos.principal";
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3de4fb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4ea64a3..9205910 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -439,6 +439,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final boolean storagePolicyEnabled;
   private StoragePolicySatisfierMode spsMode;
   private SPSPathIds spsPaths;
+  private final int spsOutstandingPathsLimit;
 
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
@@ -478,14 +479,16 @@ public class BlockManager implements BlockStatsMXBean {
 

[21/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
new file mode 100644
index 000..5635621
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -0,0 +1,572 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.ItemInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A Class to track the block collection IDs (Inode's ID) for which physical
+ * storage movement needed as per the Namespace and StorageReports from DN.
+ * It scan the pending directories for which storage movement is required and
+ * schedule the block collection IDs for movement. It track the info of
+ * scheduled items and remove the SPS xAttr from the file/Directory once
+ * movement is success.
+ */
+@InterfaceAudience.Private
+public class BlockStorageMovementNeeded {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(BlockStorageMovementNeeded.class);
+
+  private final Queue storageMovementNeeded =
+  new LinkedList();
+
+  /**
+   * Map of startId and number of child's. Number of child's indicate the
+   * number of files pending to satisfy the policy.
+   */
+  private final Map pendingWorkForDirectory =
+  new HashMap();
+
+  private final Map spsStatus =
+  new ConcurrentHashMap<>();
+
+  private final Namesystem namesystem;
+
+  // List of pending dir to satisfy the policy
+  private final Queue spsDirsToBeTraveresed = new LinkedList();
+
+  private final StoragePolicySatisfier sps;
+
+  private Daemon inodeIdCollector;
+
+  private final int maxQueuedItem;
+
+  // Amount of time to cache the SUCCESS status of path before turning it to
+  // NOT_AVAILABLE.
+  private static long statusClearanceElapsedTimeMs = 30;
+
+  public BlockStorageMovementNeeded(Namesystem namesystem,
+  StoragePolicySatisfier sps, int queueLimit) {
+this.namesystem = namesystem;
+this.sps = sps;
+this.maxQueuedItem = queueLimit;
+  }
+
+  /**
+   * Add the candidate to tracking list for which storage movement
+   * expected if necessary.
+   *
+   * @param trackInfo
+   *  - track info for satisfy the policy
+   */
+  public synchronized void add(ItemInfo trackInfo) {
+spsStatus.put(trackInfo.getStartId(),
+new StoragePolicySatisfyPathStatusInfo(
+StoragePolicySatisfyPathStatus.IN_PROGRESS));
+storageMovementNeeded.add(trackInfo);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement
+   * expected if necessary.
+   * @param startId
+   *- start id
+  

[49/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier 
file path. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66e8f9b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66e8f9b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66e8f9b3

Branch: refs/heads/trunk
Commit: 66e8f9b31529226309c924226a53dead3e6fcf11
Parents: 2acc50b
Author: Uma Maheswara Rao G 
Authored: Mon Jul 2 17:22:00 2018 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  2 +-
 .../NamenodeProtocolTranslatorPB.java   |  2 +-
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  9 ---
 .../sps/BlockStorageMovementAttemptedItems.java | 72 +++--
 .../sps/BlockStorageMovementNeeded.java | 61 ++
 .../hdfs/server/namenode/sps/Context.java   | 45 ---
 .../namenode/sps/DatanodeCacheManager.java  |  4 +-
 .../hdfs/server/namenode/sps/FileCollector.java | 13 +--
 .../namenode/sps/IntraSPSNameNodeContext.java   | 54 +
 .../sps/IntraSPSNameNodeFileIdCollector.java| 14 ++--
 .../hdfs/server/namenode/sps/ItemInfo.java  | 34 
 .../hdfs/server/namenode/sps/SPSService.java| 31 +++
 .../namenode/sps/StoragePolicySatisfier.java| 61 +-
 .../sps/StoragePolicySatisfyManager.java| 20 ++---
 .../hdfs/server/protocol/NamenodeProtocol.java  |  2 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  4 +-
 .../hdfs/server/sps/ExternalSPSContext.java | 85 
 .../sps/ExternalSPSFilePathCollector.java   | 36 +
 .../sps/ExternalStoragePolicySatisfier.java | 30 +--
 .../src/main/proto/NamenodeProtocol.proto   |  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 16 ++--
 .../sps/TestStoragePolicySatisfier.java | 66 +--
 ...stStoragePolicySatisfierWithStripedFile.java | 41 --
 .../sps/TestExternalStoragePolicySatisfier.java | 35 +++-
 27 files changed, 346 insertions(+), 414 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index e4283c6..d9367fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -265,7 +265,7 @@ public class NamenodeProtocolServerSideTranslatorPB 
implements
   RpcController controller, GetNextSPSPathRequestProto request)
   throws ServiceException {
 try {
-  String nextSPSPath = impl.getNextSPSPath();
+  Long nextSPSPath = impl.getNextSPSPath();
   if (nextSPSPath == null) {
 return GetNextSPSPathResponseProto.newBuilder().build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
index 97dee9b..3bd5986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
@@ -267,7 +267,7 @@ public class NamenodeProtocolTranslatorPB implements 
NamenodeProtocol,
   }
 
   @Override
-  public String getNextSPSPath() throws IOException {
+  public Long getNextSPSPath() throws IOException {
 GetNextSPSPathRequestProto req =
 GetNextSPSPathRequestProto.newBuilder().build();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[36/50] [abbrv] hadoop git commit: HDFS-13050: [SPS]: Create start/stop script to start external SPS process. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-13050: [SPS]: Create start/stop script to start external SPS process. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5845c36c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5845c36c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5845c36c

Branch: refs/heads/trunk
Commit: 5845c36c16c423107183287cce3be9357dad7564
Parents: 99594b4
Author: Rakesh Radhakrishnan 
Authored: Mon Jan 29 03:10:48 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../server/blockmanagement/BlockManager.java|   9 ++
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../hdfs/server/namenode/sps/Context.java   |   5 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |   4 -
 .../sps/IntraSPSNameNodeFileIdCollector.java|  12 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|   1 +
 .../namenode/sps/StoragePolicySatisfier.java|  83 +++-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   2 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  57 +---
 .../server/sps/ExternalSPSFileIDCollector.java  |  12 +-
 .../sps/ExternalStoragePolicySatisfier.java | 130 +++
 .../src/site/markdown/ArchivalStorage.md|  10 +-
 .../sps/TestStoragePolicySatisfier.java |  22 ++--
 .../sps/TestExternalStoragePolicySatisfier.java |  33 +++--
 15 files changed, 259 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5845c36c/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index bc6e7a4..94426a5 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -63,6 +63,7 @@ function hadoop_usage
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
   hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
+  hadoop_add_subcommand "sps" daemon "run external storagepolicysatisfier"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
@@ -201,6 +202,10 @@ function hdfscmd_case
 storagepolicies)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
 ;;
+sps)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.sps.ExternalStoragePolicySatisfier
+;;
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5845c36c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ac6d44b..4ea64a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -94,6 +94,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeBlockMoveTaskHandler;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeFileIdCollector;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
@@ -5106,9 +5109,15 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 updateSPSMode(StoragePolicySatisfierMode.INTERNAL);
+sps.init(new IntraSPSNameNodeContext(this.namesystem, this, sps),
+new IntraSPSNameNodeFileIdCollector(this.namesystem.getFSDirectory(),
+sps),
+

[18/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
new file mode 100644
index 000..8dc52dc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -0,0 +1,1779 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
+ * moved and finding its suggested target locations to move.
+ */
+public class TestStoragePolicySatisfier {
+
+  {
+GenericTestUtils.setLogLevel(
+getLogger(FSTreeTraverser.class), Level.DEBUG);
+  }
+
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
+  private final Configuration config = new HdfsConfiguration();
+  private StorageType[][] allDiskTypes =
+ 

[47/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db3f227d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db3f227d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db3f227d

Branch: refs/heads/trunk
Commit: db3f227d8aeeea8b5bb473fed9ca4f6a17b0fca5
Parents: 66e8f9b
Author: Rakesh Radhakrishnan 
Authored: Thu Jul 5 10:10:13 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../hdfs/server/federation/router/RouterNamenodeProtocol.java | 6 ++
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java | 7 +++
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../server/namenode/sps/IntraSPSNameNodeFileIdCollector.java  | 4 ++--
 4 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index 0433650..edfb391 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -184,4 +184,10 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 rpcServer.checkOperation(OperationCategory.READ, false);
 return false;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index d93f99d..36645c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2509,4 +2509,11 @@ public class RouterRpcServer extends AbstractService
 checkOperation(OperationCategory.READ, false);
 return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+checkOperation(OperationCategory.READ, false);
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bae6b4e..bb63f2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -5078,7 +5078,7 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
 String modeVal = spsMode;
-if (org.apache.commons.lang.StringUtils.isBlank(modeVal)) {
+if (org.apache.commons.lang3.StringUtils.isBlank(modeVal)) {
   modeVal = conf.get(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
   DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeFileIdCollector.java
--
diff --git 

[07/50] [abbrv] hadoop git commit: HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to 
improve thread cleanup time. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b601f2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b601f2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b601f2c

Branch: refs/heads/trunk
Commit: 3b601f2c0e16b84e35ebe5ecdcd06d3277eabb74
Parents: 4bcf61c
Author: Uma Maheswara Rao G 
Authored: Wed Jul 19 00:55:26 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../datanode/BlockStorageMovementTracker.java   | 16 
 .../server/datanode/StoragePolicySatisfyWorker.java |  5 +++--
 2 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b601f2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index c7e952b..f3d2bb6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -77,7 +77,8 @@ public class BlockStorageMovementTracker implements Runnable {
 moverTaskFutures.wait(2000);
   }
 } catch (InterruptedException ignore) {
-  // ignore
+  // Sets interrupt flag of this thread.
+  Thread.currentThread().interrupt();
 }
   }
   try {
@@ -102,12 +103,19 @@ public class BlockStorageMovementTracker implements 
Runnable {
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }
-// handle completed or inprogress blocks movements per trackId.
-blksMovementsStatusHandler.handle(resultPerTrackIdList);
+if (running) {
+  // handle completed or inprogress blocks movements per trackId.
+  blksMovementsStatusHandler.handle(resultPerTrackIdList);
+}
 movementResults.remove(trackId);
   }
 }
-  } catch (ExecutionException | InterruptedException e) {
+  } catch (InterruptedException e) {
+if (running) {
+  LOG.error("Exception while moving block replica to target storage"
+  + " type", e);
+}
+  } catch (ExecutionException e) {
 // TODO: Do we need failure retries and implement the same if required.
 LOG.error("Exception while moving block replica to target storage 
type",
 e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b601f2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 196cd58..4e57805 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -137,8 +137,8 @@ public class StoragePolicySatisfyWorker {
* thread.
*/
   void stop() {
-movementTrackerThread.interrupt();
 movementTracker.stopTracking();
+movementTrackerThread.interrupt();
   }
 
   /**
@@ -147,7 +147,8 @@ public class StoragePolicySatisfyWorker {
   void waitToFinishWorkerThread() {
 try {
   movementTrackerThread.join(3000);
-} catch (InterruptedException ie) {
+} catch (InterruptedException ignore) {
+  // ignore
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/50] [abbrv] hadoop git commit: HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to 
improve thread cleanup time. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b601f2c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b601f2c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b601f2c

Branch: refs/heads/HDFS-10285
Commit: 3b601f2c0e16b84e35ebe5ecdcd06d3277eabb74
Parents: 4bcf61c
Author: Uma Maheswara Rao G 
Authored: Wed Jul 19 00:55:26 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../datanode/BlockStorageMovementTracker.java   | 16 
 .../server/datanode/StoragePolicySatisfyWorker.java |  5 +++--
 2 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b601f2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index c7e952b..f3d2bb6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -77,7 +77,8 @@ public class BlockStorageMovementTracker implements Runnable {
 moverTaskFutures.wait(2000);
   }
 } catch (InterruptedException ignore) {
-  // ignore
+  // Sets interrupt flag of this thread.
+  Thread.currentThread().interrupt();
 }
   }
   try {
@@ -102,12 +103,19 @@ public class BlockStorageMovementTracker implements 
Runnable {
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }
-// handle completed or inprogress blocks movements per trackId.
-blksMovementsStatusHandler.handle(resultPerTrackIdList);
+if (running) {
+  // handle completed or inprogress blocks movements per trackId.
+  blksMovementsStatusHandler.handle(resultPerTrackIdList);
+}
 movementResults.remove(trackId);
   }
 }
-  } catch (ExecutionException | InterruptedException e) {
+  } catch (InterruptedException e) {
+if (running) {
+  LOG.error("Exception while moving block replica to target storage"
+  + " type", e);
+}
+  } catch (ExecutionException e) {
 // TODO: Do we need failure retries and implement the same if required.
 LOG.error("Exception while moving block replica to target storage 
type",
 e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b601f2c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 196cd58..4e57805 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -137,8 +137,8 @@ public class StoragePolicySatisfyWorker {
* thread.
*/
   void stop() {
-movementTrackerThread.interrupt();
 movementTracker.stopTracking();
+movementTrackerThread.interrupt();
   }
 
   /**
@@ -147,7 +147,8 @@ public class StoragePolicySatisfyWorker {
   void waitToFinishWorkerThread() {
 try {
   movementTrackerThread.join(3000);
-} catch (InterruptedException ie) {
+} catch (InterruptedException ignore) {
+  // ignore
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[46/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Addendum. Resolve conflicts after rebasing branch to trunk. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13076: [SPS]: Addendum. Resolve conflicts after rebasing branch to trunk. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dfcb331b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dfcb331b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dfcb331b

Branch: refs/heads/HDFS-10285
Commit: dfcb331ba3516264398121c9f23af3a79c0509cc
Parents: db3f227
Author: Rakesh Radhakrishnan 
Authored: Fri Jul 20 10:59:16 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/server/datanode/DataNode.java| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dfcb331b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index a714602..21af33f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3624,8 +3624,8 @@ public class DataNode extends ReconfigurableBase
 }
 return this.diskBalancer;
   }
-}
 
   StoragePolicySatisfyWorker getStoragePolicySatisfyWorker() {
 return storagePolicySatisfyWorker;
-  }}
+  }
+}


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[19/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
deleted file mode 100644
index 6991ad2..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
- * to be moved and finding its expected target locations in order to satisfy 
the
- * storage policy.
- */
-public class TestStoragePolicySatisfierWithStripedFile {
-
-  private static final Logger LOG = LoggerFactory
-  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
-
-  private final int stripesPerBlock = 2;
-
-  private ErasureCodingPolicy ecPolicy;
-  private int dataBlocks;
-  private int parityBlocks;
-  private int cellSize;
-  private int defaultStripeBlockSize;
-
-  private ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
-  }
-
-  /**
-   * Initialize erasure coding policy.
-   */
-  @Before
-  public void init(){
-ecPolicy = getEcPolicy();
-dataBlocks = ecPolicy.getNumDataUnits();
-parityBlocks = ecPolicy.getNumParityUnits();
-cellSize = ecPolicy.getCellSize();
-defaultStripeBlockSize = cellSize * stripesPerBlock;
-  }
-
-  /**
-   * Tests to verify that all the striped blocks(data + parity blocks) are
-   * moving to satisfy the storage policy.
-   */
-  @Test(timeout = 30)
-  public void testMoverWithFullStripe() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
-int storagesPerDatanode = 2;
-long capacity = 20 * defaultStripeBlockSize;
-long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
-for (int i = 0; i < numOfDatanodes; i++) {
-  for (int j = 0; j < storagesPerDatanode; j++) {
-capacities[i][j] = capacity;
-  }
-}
-
-final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
-initConfWithStripe(conf, defaultStripeBlockSize);
-final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-.numDataNodes(numOfDatanodes)
-.storagesPerDatanode(storagesPerDatanode)
-   

[36/50] [abbrv] hadoop git commit: HDFS-13050: [SPS]: Create start/stop script to start external SPS process. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-13050: [SPS]: Create start/stop script to start external SPS process. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5845c36c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5845c36c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5845c36c

Branch: refs/heads/HDFS-10285
Commit: 5845c36c16c423107183287cce3be9357dad7564
Parents: 99594b4
Author: Rakesh Radhakrishnan 
Authored: Mon Jan 29 03:10:48 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../server/blockmanagement/BlockManager.java|   9 ++
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../hdfs/server/namenode/sps/Context.java   |   5 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |   4 -
 .../sps/IntraSPSNameNodeFileIdCollector.java|  12 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|   1 +
 .../namenode/sps/StoragePolicySatisfier.java|  83 +++-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   2 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  57 +---
 .../server/sps/ExternalSPSFileIDCollector.java  |  12 +-
 .../sps/ExternalStoragePolicySatisfier.java | 130 +++
 .../src/site/markdown/ArchivalStorage.md|  10 +-
 .../sps/TestStoragePolicySatisfier.java |  22 ++--
 .../sps/TestExternalStoragePolicySatisfier.java |  33 +++--
 15 files changed, 259 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5845c36c/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index bc6e7a4..94426a5 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -63,6 +63,7 @@ function hadoop_usage
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
   hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
+  hadoop_add_subcommand "sps" daemon "run external storagepolicysatisfier"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
@@ -201,6 +202,10 @@ function hdfscmd_case
 storagepolicies)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
 ;;
+sps)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.sps.ExternalStoragePolicySatisfier
+;;
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5845c36c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ac6d44b..4ea64a3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -94,6 +94,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeBlockMoveTaskHandler;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeFileIdCollector;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
@@ -5106,9 +5109,15 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 updateSPSMode(StoragePolicySatisfierMode.INTERNAL);
+sps.init(new IntraSPSNameNodeContext(this.namesystem, this, sps),
+new IntraSPSNameNodeFileIdCollector(this.namesystem.getFSDirectory(),
+sps),
+

[22/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78420719
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78420719
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78420719

Branch: refs/heads/HDFS-10285
Commit: 78420719eb1f138c6f10558befb7bc8ebcc28a54
Parents: c561cb3
Author: Uma Maheswara Rao G 
Authored: Fri Dec 22 09:10:12 2017 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|6 +-
 .../BlockStorageMovementAttemptedItems.java |  241 ---
 .../namenode/BlockStorageMovementNeeded.java|  574 --
 .../hdfs/server/namenode/FSNamesystem.java  |1 +
 .../hdfs/server/namenode/IntraNNSPSContext.java |   41 +
 .../server/namenode/StoragePolicySatisfier.java |  973 --
 .../sps/BlockStorageMovementAttemptedItems.java |  241 +++
 .../sps/BlockStorageMovementNeeded.java |  572 ++
 .../namenode/sps/StoragePolicySatisfier.java|  988 ++
 .../hdfs/server/namenode/sps/package-info.java  |   28 +
 .../TestBlockStorageMovementAttemptedItems.java |  196 --
 .../namenode/TestStoragePolicySatisfier.java| 1775 -
 ...stStoragePolicySatisfierWithStripedFile.java |  580 --
 .../TestBlockStorageMovementAttemptedItems.java |  196 ++
 .../sps/TestStoragePolicySatisfier.java | 1779 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  580 ++
 16 files changed, 4430 insertions(+), 4341 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0957fe2..ec99a9f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -478,7 +479,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, this, conf);
+StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
+sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
deleted file mode 100644
index 643255f..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with 

[11/50] [abbrv] hadoop git commit: HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0e820f16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0e820f16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0e820f16

Branch: refs/heads/HDFS-10285
Commit: 0e820f16af309cc8476edba448dd548686431133
Parents: 3b601f2
Author: Uma Maheswara Rao G 
Authored: Thu Aug 17 13:21:07 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../server/blockmanagement/BlockManager.java| 104 +++
 .../BlockStorageMovementAttemptedItems.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  20 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  22 ++--
 .../server/namenode/StoragePolicySatisfier.java |  20 ++--
 .../protocol/BlocksStorageMovementResult.java   |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  11 +-
 .../src/main/resources/hdfs-default.xml |  10 +-
 .../src/site/markdown/ArchivalStorage.md|  14 +--
 .../src/site/markdown/HDFSCommands.md   |   2 +-
 .../TestStoragePolicySatisfyWorker.java |   2 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  22 ++--
 .../hdfs/server/mover/TestStorageMover.java |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  99 --
 .../TestPersistentStoragePolicySatisfier.java   |   6 +-
 .../namenode/TestStoragePolicySatisfier.java|  35 +--
 .../TestStoragePolicySatisfierWithHA.java   |  10 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   8 ++
 .../hdfs/tools/TestStoragePolicyCommands.java   |  21 ++--
 22 files changed, 265 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e820f16/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 38be348b..bc6e7a4 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -62,7 +62,7 @@ function hadoop_usage
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
-  hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage 
policies"
+  hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e820f16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b5a2a5a..e66806f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -614,10 +614,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
   // SPS related configurations
-  public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
-  "dfs.storage.policy.satisfier.activate";
-  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
-  true;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY =
+  "dfs.storage.policy.satisfier.enabled";
+  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
+  false;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0e820f16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[29/50] [abbrv] hadoop git commit: HDFS-13033: [SPS]: Implement a mechanism to do file block movements for external SPS. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13033: [SPS]: Implement a mechanism to do file block movements for 
external SPS. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b0cb8d9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b0cb8d9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b0cb8d9b

Branch: refs/heads/HDFS-10285
Commit: b0cb8d9bb44c963ae686d2b5c1b70bc76b955e10
Parents: 3159b39
Author: Uma Maheswara Rao G 
Authored: Tue Jan 23 16:19:46 2018 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:03 2018 -0700

--
 .../hdfs/server/balancer/NameNodeConnector.java |   8 +
 .../hdfs/server/common/sps/BlockDispatcher.java | 186 +
 .../sps/BlockMovementAttemptFinished.java   |  80 ++
 .../server/common/sps/BlockMovementStatus.java  |  53 
 .../common/sps/BlockStorageMovementTracker.java | 184 +
 .../sps/BlocksMovementsStatusHandler.java   |  95 +++
 .../hdfs/server/common/sps/package-info.java|  27 ++
 .../datanode/BlockStorageMovementTracker.java   | 186 -
 .../datanode/StoragePolicySatisfyWorker.java| 271 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   4 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |   3 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  12 +-
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   3 +-
 .../hdfs/server/namenode/sps/SPSService.java|  14 +-
 .../namenode/sps/StoragePolicySatisfier.java|  30 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java| 233 
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../sps/TestStoragePolicySatisfier.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  69 -
 19 files changed, 997 insertions(+), 469 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0cb8d9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index b0dd779..6bfbbb3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -269,6 +269,14 @@ public class NameNodeConnector implements Closeable {
 }
   }
 
+  /**
+   * Returns fallbackToSimpleAuth. This will be true or false during calls to
+   * indicate if a secure client falls back to simple auth.
+   */
+  public AtomicBoolean getFallbackToSimpleAuth() {
+return fallbackToSimpleAuth;
+  }
+
   @Override
   public void close() {
 keyManager.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b0cb8d9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
new file mode 100644
index 000..f87fcae
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common.sps;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import 

[02/50] [abbrv] hadoop git commit: HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when dropSPSWork() called. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when 
dropSPSWork() called. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e53f89cc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e53f89cc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e53f89cc

Branch: refs/heads/HDFS-10285
Commit: e53f89ccc361615b254e3ecd270728573908c071
Parents: 5ce332d
Author: Uma Maheswara Rao G 
Authored: Tue May 30 18:12:17 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:05:59 2018 -0700

--
 .../hdfs/server/datanode/BlockStorageMovementTracker.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e53f89cc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 99858bc..c7e952b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -88,13 +88,17 @@ public class BlockStorageMovementTracker implements 
Runnable {
   long trackId = result.getTrackId();
   List> blocksMoving = moverTaskFutures
   .get(trackId);
+  if (blocksMoving == null) {
+LOG.warn("Future task doesn't exist for trackId " + trackId);
+continue;
+  }
   blocksMoving.remove(future);
 
   List resultPerTrackIdList =
   addMovementResultToTrackIdList(result);
 
   // Completed all the scheduled blocks movement under this 'trackId'.
-  if (blocksMoving.isEmpty()) {
+  if (blocksMoving.isEmpty() || moverTaskFutures.get(trackId) == null) 
{
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
new file mode 100644
index 000..c1a2b8b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -0,0 +1,580 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
+ * to be moved and finding its expected target locations in order to satisfy 
the
+ * storage policy.
+ */
+public class TestStoragePolicySatisfierWithStripedFile {
+
+  private static final Logger LOG = LoggerFactory
+  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
+
+  private final int stripesPerBlock = 2;
+
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocks;
+  private int cellSize;
+  private int defaultStripeBlockSize;
+
+  private ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
+  /**
+   * Initialize erasure coding policy.
+   */
+  @Before
+  public void init(){
+ecPolicy = getEcPolicy();
+dataBlocks = ecPolicy.getNumDataUnits();
+parityBlocks = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+defaultStripeBlockSize = cellSize * stripesPerBlock;
+  }
+
+  /**
+   * Tests to verify that all the striped blocks(data + parity blocks) are
+   * moving to satisfy the storage policy.
+   */
+  @Test(timeout = 30)
+  public void testMoverWithFullStripe() throws Exception {
+// start 10 datanodes
+int numOfDatanodes = 10;
+int storagesPerDatanode = 2;
+long capacity = 20 * defaultStripeBlockSize;
+long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+for (int i = 0; i < numOfDatanodes; i++) {
+  for (int j = 0; j < storagesPerDatanode; j++) {
+capacities[i][j] = capacity;
+  }
+}
+
+final Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+true);
+initConfWithStripe(conf, defaultStripeBlockSize);
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(numOfDatanodes)
+

[37/50] [abbrv] hadoop git commit: HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to 
minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75ccc139
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75ccc139
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75ccc139

Branch: refs/heads/HDFS-10285
Commit: 75ccc1396b67cdc0d4992a4af3911f9f88c2
Parents: 8467ec2
Author: Surendra Singh Lilhore 
Authored: Thu Mar 1 00:08:37 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:05 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../NamenodeProtocolServerSideTranslatorPB.java |  19 --
 .../NamenodeProtocolTranslatorPB.java   |  17 -
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 -
 .../hdfs/server/namenode/sps/Context.java   |  24 +-
 .../namenode/sps/DatanodeCacheManager.java  | 121 +++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java| 340 ++-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  16 -
 .../hdfs/server/sps/ExternalSPSContext.java |  32 +-
 .../src/main/proto/NamenodeProtocol.proto   |  25 --
 .../src/main/resources/hdfs-default.xml |  11 +
 .../src/site/markdown/ArchivalStorage.md|   2 +-
 .../TestStoragePolicySatisfyWorker.java |   3 +
 .../TestPersistentStoragePolicySatisfier.java   |   6 +
 .../TestStoragePolicySatisfierWithHA.java   |   3 +
 .../sps/TestStoragePolicySatisfier.java |   4 +
 ...stStoragePolicySatisfierWithStripedFile.java |  24 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |   3 +
 19 files changed, 431 insertions(+), 260 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ccc139/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b354d64..cf383d0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -646,6 +646,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
   "dfs.storage.policy.satisfier.max.outstanding.paths";
   public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+  // SPS datanode cache config, defaulting to 5mins.
+  public static final String DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS =
+  "dfs.storage.policy.satisfier.datanode.cache.refresh.interval.ms";
+  public static final long DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS_DEFAULT =
+  30L;
 
   // SPS keytab configurations, by default it is disabled.
   public static final String  DFS_SPS_ADDRESS_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75ccc139/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index ed176cc..e4283c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -277,21 +275,4 @@ public class 

[45/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39ed3a66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39ed3a66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39ed3a66

Branch: refs/heads/HDFS-10285
Commit: 39ed3a66dbb01383ed16b141183fc48bfd2e613d
Parents: dfcb331
Author: Uma Maheswara Rao G 
Authored: Mon Jul 23 16:05:35 2018 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   26 -
 .../hadoop/hdfs/protocol/ClientProtocol.java|   29 -
 .../hadoop/hdfs/protocol/HdfsConstants.java |   40 -
 .../ClientNamenodeProtocolTranslatorPB.java |   36 -
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |   37 -
 .../src/main/proto/ClientNamenodeProtocol.proto |   26 -
 .../federation/router/RouterRpcServer.java  |   14 -
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |4 -
 ...tNamenodeProtocolServerSideTranslatorPB.java |   39 -
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   99 -
 .../hadoop/hdfs/server/balancer/ExitStatus.java |3 +-
 .../server/blockmanagement/BlockManager.java|   21 +-
 .../blockmanagement/DatanodeDescriptor.java |   68 -
 .../server/blockmanagement/DatanodeManager.java |   94 +-
 .../hdfs/server/datanode/BPOfferService.java|   12 -
 .../hadoop/hdfs/server/datanode/DataNode.java   |   12 -
 .../datanode/StoragePolicySatisfyWorker.java|  217 ---
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   21 -
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   12 -
 .../hdfs/server/namenode/FSNamesystem.java  |8 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   36 -
 .../sps/BlockStorageMovementNeeded.java |  121 +-
 .../hdfs/server/namenode/sps/Context.java   |5 -
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   63 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |  189 --
 .../sps/IntraSPSNameNodeFileIdCollector.java|  185 --
 .../hdfs/server/namenode/sps/SPSService.java|5 -
 .../namenode/sps/StoragePolicySatisfier.java|   44 -
 .../sps/StoragePolicySatisfyManager.java|  156 +-
 .../hdfs/server/sps/ExternalSPSContext.java |5 -
 .../sps/ExternalStoragePolicySatisfier.java |9 -
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   87 +-
 .../src/main/proto/DatanodeProtocol.proto   |   30 -
 .../src/main/resources/hdfs-default.xml |   14 +-
 .../src/site/markdown/ArchivalStorage.md|   22 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   40 +
 .../server/datanode/TestBPOfferService.java |4 -
 .../TestStoragePolicySatisfyWorker.java |  241 ---
 .../hadoop/hdfs/server/mover/TestMover.java |7 +-
 .../namenode/TestNameNodeReconfigure.java   |   32 +-
 .../TestPersistentStoragePolicySatisfier.java   |  124 +-
 .../TestStoragePolicySatisfierWithHA.java   |  152 +-
 .../TestBlockStorageMovementAttemptedItems.java |3 +-
 .../sps/TestStoragePolicySatisfier.java | 1825 --
 ...stStoragePolicySatisfierWithStripedFile.java |   87 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 1433 +-
 .../hdfs/tools/TestStoragePolicyCommands.java   |2 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |   56 +-
 48 files changed, 1517 insertions(+), 4278 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index b6f9bdd..adbb133 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,7 +123,6 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
-import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -3110,10 +3109,6 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public boolean 

[30/50] [abbrv] hadoop git commit: HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma Maheswara Rao G.

2018-08-12 Thread umamahesh
HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma 
Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99594b48
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99594b48
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99594b48

Branch: refs/heads/HDFS-10285
Commit: 99594b48b8e040ab5a0939d7c3dbcfb34400e6fc
Parents: 3b83110
Author: Surendra Singh Lilhore 
Authored: Sun Jan 28 20:46:56 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  67 +
 .../NamenodeProtocolTranslatorPB.java   |  58 
 .../hdfs/server/balancer/NameNodeConnector.java |  28 +-
 .../server/blockmanagement/BlockManager.java|  19 ++
 .../server/blockmanagement/DatanodeManager.java |  18 ++
 .../hdfs/server/common/HdfsServerConstants.java |   3 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  53 +++-
 .../sps/BlockStorageMovementNeeded.java |   8 +-
 .../hdfs/server/namenode/sps/Context.java   |   9 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java|  15 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  46 +++-
 .../hdfs/server/sps/ExternalSPSContext.java | 271 +++
 .../src/main/proto/NamenodeProtocol.proto   |  57 
 .../sps/TestExternalStoragePolicySatisfier.java |  31 +--
 15 files changed, 652 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99594b48/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 90c2c49..25eafdf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -33,10 +35,16 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto;
 import 

[28/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-08-12 Thread umamahesh
HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for 
external/internal implementations. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d4f74e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d4f74e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d4f74e7

Branch: refs/heads/HDFS-10285
Commit: 8d4f74e7339abc77dc0daa162d7bd2814bd79b3d
Parents: 05d4daf
Author: Rakesh Radhakrishnan 
Authored: Fri Jan 19 08:51:49 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:03 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|  61 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirectory.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  10 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |  44 
 .../namenode/sps/BlockMovementListener.java |  40 
 .../sps/BlockStorageMovementAttemptedItems.java |  28 +--
 .../sps/BlockStorageMovementNeeded.java | 207 ---
 .../hdfs/server/namenode/sps/Context.java   |  43 ++--
 .../server/namenode/sps/FileIdCollector.java|  43 
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |  62 ++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  62 ++
 .../sps/IntraSPSNameNodeFileIdCollector.java| 178 
 .../hdfs/server/namenode/sps/ItemInfo.java  |  81 
 .../hdfs/server/namenode/sps/SPSPathIds.java|  63 ++
 .../hdfs/server/namenode/sps/SPSService.java| 107 ++
 .../namenode/sps/StoragePolicySatisfier.java| 175 +++-
 .../TestBlockStorageMovementAttemptedItems.java |  19 +-
 .../sps/TestStoragePolicySatisfier.java | 111 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  19 +-
 20 files changed, 938 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d4f74e7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5ee4026..d12cb01 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -93,8 +93,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -434,7 +434,8 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private Context spsctxt = null;
+  private final SPSPathIds spsPaths;
+
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -481,8 +482,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
-sps = new StoragePolicySatisfier(spsctxt);
+sps = new StoragePolicySatisfier(conf);
+spsPaths = new SPSPathIds();
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5041,8 +5042,7 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-// TODO: FSDirectory will get removed via HDFS-12911 modularization work
-sps.start(false, namesystem.getFSDirectory());
+sps.start(false);
   }
 
   /**
@@ -5078,8 +5078,7 @@ public class BlockManager implements BlockStatsMXBean {
   

[39/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13165: [SPS]: Collects successfully moved block details via IBR. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2acc50b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2acc50b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2acc50b8

Branch: refs/heads/HDFS-10285
Commit: 2acc50b826fa8b00f2b09d9546c4b3215b89d46d
Parents: 75ccc13
Author: Rakesh Radhakrishnan 
Authored: Sun Apr 29 11:06:59 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:05 2018 -0700

--
 .../DatanodeProtocolClientSideTranslatorPB.java |  11 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  25 ---
 .../server/blockmanagement/BlockManager.java|  86 +-
 .../sps/BlockMovementAttemptFinished.java   |  24 ++-
 .../common/sps/BlockStorageMovementTracker.java | 109 +---
 .../sps/BlocksMovementsStatusHandler.java   |  70 +---
 .../hdfs/server/datanode/BPServiceActor.java|  14 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +-
 .../datanode/StoragePolicySatisfyWorker.java|  48 ++
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  13 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   8 +-
 .../hdfs/server/namenode/FSDirectory.java   |   5 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  30 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  19 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  46 +++--
 .../sps/BlockStorageMovementAttemptedItems.java | 167 +--
 .../hdfs/server/namenode/sps/SPSService.java|  19 ++-
 .../namenode/sps/StoragePolicySatisfier.java| 154 +++--
 .../hdfs/server/protocol/DatanodeProtocol.java  |   4 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  32 ++--
 .../sps/ExternalStoragePolicySatisfier.java |   3 +-
 .../src/main/proto/DatanodeProtocol.proto   |   9 -
 .../src/main/resources/hdfs-default.xml |  41 +
 .../TestNameNodePrunesMissingStorages.java  |   4 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../SimpleBlocksMovementsStatusHandler.java |  88 ++
 .../server/datanode/TestBPOfferService.java |  12 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   7 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  76 +
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   5 +-
 .../namenode/TestNameNodeReconfigure.java   |  17 +-
 .../TestBlockStorageMovementAttemptedItems.java |  88 ++
 .../sps/TestStoragePolicySatisfier.java |  73 ++--
 ...stStoragePolicySatisfierWithStripedFile.java |  40 +++--
 .../sps/TestExternalStoragePolicySatisfier.java |  44 ++---
 42 files changed, 776 insertions(+), 659 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2acc50b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index dcc0705..e4125dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -139,8 +138,7 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   VolumeFailureSummary volumeFailureSummary,
   boolean requestFullBlockReportLease,
   @Nonnull 

[35/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13057: [SPS]: Revisit configurations to make SPS service modes 
internal/external/none. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3b83110d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3b83110d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3b83110d

Branch: refs/heads/HDFS-10285
Commit: 3b83110d5ed582b9f913ecf3f62ce410535f8fca
Parents: b0cb8d9
Author: Uma Maheswara Rao G 
Authored: Fri Jan 26 08:57:29 2018 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  39 
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +-
 .../server/blockmanagement/BlockManager.java| 105 +++---
 .../hdfs/server/namenode/FSNamesystem.java  |   6 +-
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  34 ++--
 .../sps/BlockStorageMovementNeeded.java |   2 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   3 +
 .../hdfs/server/namenode/sps/SPSService.java|   4 +-
 .../namenode/sps/StoragePolicySatisfier.java|  17 +-
 .../server/sps/ExternalSPSFileIDCollector.java  |  32 ++-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  16 +-
 .../src/main/resources/hdfs-default.xml |  11 +-
 .../src/site/markdown/ArchivalStorage.md|  17 +-
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  45 +++--
 .../hdfs/server/mover/TestStorageMover.java |   4 +-
 .../namenode/TestNameNodeReconfigure.java   | 105 +-
 .../TestPersistentStoragePolicySatisfier.java   |   9 +-
 .../TestStoragePolicySatisfierWithHA.java   |  12 +-
 .../sps/TestStoragePolicySatisfier.java | 202 +++
 ...stStoragePolicySatisfierWithStripedFile.java |  17 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 112 +++---
 .../hdfs/tools/TestStoragePolicyCommands.java   |   5 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  14 +-
 25 files changed, 500 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index aabcdd9..ab48dcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -129,6 +129,45 @@ public final class HdfsConstants {
   }
 
   /**
+   * Storage policy satisfier service modes.
+   */
+  public enum StoragePolicySatisfierMode {
+
+/**
+ * This mode represents that SPS service is running inside Namenode and can
+ * accept any SPS call request.
+ */
+INTERNAL,
+
+/**
+ * This mode represents that SPS service is running outside Namenode as an
+ * external service and can accept any SPS call request.
+ */
+EXTERNAL,
+
+/**
+ * This mode represents that SPS service is disabled and cannot accept any
+ * SPS call request.
+ */
+NONE;
+
+private static final Map MAP =
+new HashMap<>();
+
+static {
+  for (StoragePolicySatisfierMode a : values()) {
+MAP.put(a.name(), a);
+  }
+}
+
+/** Convert the given String to a StoragePolicySatisfierMode. */
+public static StoragePolicySatisfierMode fromString(String s) {
+  return MAP.get(StringUtils.toUpperCase(s));
+}
+  }
+
+
+  /**
* Storage policy satisfy path status.
*/
   public enum StoragePolicySatisfyPathStatus {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index f1a59d3..bf29d14 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import 

[44/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
deleted file mode 100644
index d6e92d2..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeBlockMoveTaskHandler.java
+++ /dev/null
@@ -1,63 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-
-/**
- * This class handles the internal SPS block movements. This will assign block
- * movement tasks to target datanode descriptors.
- */
-@InterfaceAudience.Private
-public class IntraSPSNameNodeBlockMoveTaskHandler
-implements BlockMoveTaskHandler {
-
-  private BlockManager blockManager;
-  private Namesystem namesystem;
-
-  public IntraSPSNameNodeBlockMoveTaskHandler(BlockManager blockManager,
-  Namesystem namesytem) {
-this.blockManager = blockManager;
-this.namesystem = namesytem;
-  }
-
-  @Override
-  public void submitMoveTask(BlockMovingInfo blkMovingInfo) throws IOException 
{
-namesystem.readLock();
-try {
-  DatanodeDescriptor dn = blockManager.getDatanodeManager()
-  .getDatanode(blkMovingInfo.getTarget().getDatanodeUuid());
-  if (dn == null) {
-throw new IOException("Failed to schedule block movement task:"
-+ blkMovingInfo + " as target datanode: "
-+ blkMovingInfo.getTarget() + " doesn't exists");
-  }
-  dn.incrementBlocksScheduled(blkMovingInfo.getTargetStorageType());
-  dn.addBlocksToMoveStorage(blkMovingInfo);
-} finally {
-  namesystem.readUnlock();
-}
-  }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
deleted file mode 100644
index 2bf4810..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeContext.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-
-import java.io.IOException;
-import 

[06/50] [abbrv] hadoop git commit: HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4bcf61c6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4bcf61c6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4bcf61c6

Branch: refs/heads/HDFS-10285
Commit: 4bcf61c696909342f1a238f614d4471c4b6fbad0
Parents: 9e82e5a
Author: Uma Maheswara Rao G 
Authored: Mon Jul 17 10:24:06 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../hdfs/server/blockmanagement/BlockManager.java   |  2 +-
 .../server/datanode/StoragePolicySatisfyWorker.java |  6 +++---
 .../hdfs/server/namenode/StoragePolicySatisfier.java|  6 +++---
 .../hadoop/hdfs/server/protocol/DatanodeProtocol.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/mover/TestMover.java  |  7 ---
 .../server/namenode/TestStoragePolicySatisfier.java | 12 ++--
 6 files changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bcf61c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 988067c..8b7abaa 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -427,7 +427,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final BlockIdManager blockIdManager;
 
-  /** For satisfying block storage policies */
+  /** For satisfying block storage policies. */
   private final StoragePolicySatisfier sps;
   private final BlockStorageMovementNeeded storageMovementNeeded =
   new BlockStorageMovementNeeded();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bcf61c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index f4f97dd..196cd58 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
   /**
* Block movement status code.
*/
-  public static enum BlockMovementStatus {
+  public enum BlockMovementStatus {
 /** Success. */
 DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
 /**
@@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
 
 private final int code;
 
-private BlockMovementStatus(int code) {
+BlockMovementStatus(int code) {
   this.code = code;
 }
 
@@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
 private final DatanodeInfo target;
 private final BlockMovementStatus status;
 
-public BlockMovementResult(long trackId, long blockId,
+BlockMovementResult(long trackId, long blockId,
 DatanodeInfo target, BlockMovementStatus status) {
   this.trackId = trackId;
   this.blockId = blockId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4bcf61c6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 00b4cd0..af3b7f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   private static class StorageTypeNodePair {
-public StorageType storageType = null;
-public DatanodeDescriptor dn = null;
+private StorageType storageType = null;
+private 

[05/50] [abbrv] hadoop git commit: HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running together. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running 
together. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5eb24ef7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5eb24ef7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5eb24ef7

Branch: refs/heads/HDFS-10285
Commit: 5eb24ef7e7b8fb61a5f5b88bae3596b30aaeb60b
Parents: 0b360b1
Author: Uma Maheswara Rao G 
Authored: Wed Jul 12 17:56:56 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../server/namenode/StoragePolicySatisfier.java | 53 +++-
 .../namenode/TestStoragePolicySatisfier.java|  3 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  5 +-
 3 files changed, 34 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5eb24ef7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 97cbf1b..00b4cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -128,6 +128,14 @@ public class StoragePolicySatisfier implements Runnable {
*/
   public synchronized void start(boolean reconfigStart) {
 isRunning = true;
+if (checkIfMoverRunning()) {
+  isRunning = false;
+  LOG.error(
+  "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
+  + HdfsServerConstants.MOVER_ID_PATH.toString()
+  + " been opened. Maybe a Mover instance is running!");
+  return;
+}
 if (reconfigStart) {
   LOG.info("Starting StoragePolicySatisfier, as admin requested to "
   + "activate it.");
@@ -211,20 +219,6 @@ public class StoragePolicySatisfier implements Runnable {
 
   @Override
   public void run() {
-boolean isMoverRunning = !checkIfMoverRunning();
-synchronized (this) {
-  isRunning = isMoverRunning;
-  if (!isRunning) {
-// Stopping monitor thread and clearing queues as well
-this.clearQueues();
-this.storageMovementsMonitor.stopGracefully();
-LOG.error(
-"Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-+ HdfsServerConstants.MOVER_ID_PATH.toString()
-+ " been opened. Maybe a Mover instance is running!");
-return;
-  }
-}
 while (namesystem.isRunning() && isRunning) {
   try {
 if (!namesystem.isInSafeMode()) {
@@ -274,25 +268,34 @@ public class StoragePolicySatisfier implements Runnable {
 // we want to check block movements.
 Thread.sleep(3000);
   } catch (Throwable t) {
-synchronized (this) {
+handleException(t);
+  }
+}
+  }
+
+  private void handleException(Throwable t) {
+// double check to avoid entering into synchronized block.
+if (isRunning) {
+  synchronized (this) {
+if (isRunning) {
   isRunning = false;
   // Stopping monitor thread and clearing queues as well
   this.clearQueues();
   this.storageMovementsMonitor.stopGracefully();
-}
-if (!namesystem.isRunning()) {
-  LOG.info("Stopping StoragePolicySatisfier.");
-  if (!(t instanceof InterruptedException)) {
-LOG.info("StoragePolicySatisfier received an exception"
-+ " while shutting down.", t);
+  if (!namesystem.isRunning()) {
+LOG.info("Stopping StoragePolicySatisfier.");
+if (!(t instanceof InterruptedException)) {
+  LOG.info("StoragePolicySatisfier received an exception"
+  + " while shutting down.", t);
+}
+return;
   }
-  break;
 }
-LOG.error("StoragePolicySatisfier thread received runtime exception. "
-+ "Stopping Storage policy satisfier work", t);
-break;
   }
 }
+LOG.error("StoragePolicySatisfier thread received runtime exception. "
++ "Stopping Storage policy satisfier work", t);
+return;
   }
 
   private BlocksMovingAnalysisStatus 
analyseBlocksStorageMovementsAndAssignToDN(


[27/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d4f74e7/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index 2a7bde5..9354044 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -72,7 +72,6 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -147,12 +146,11 @@ public class TestStoragePolicySatisfier {
 startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
 storagesPerDatanode, capacity, hdfsCluster);
 
-dfs.satisfyStoragePolicy(new Path(file));
-
 hdfsCluster.triggerHeartbeats();
+dfs.satisfyStoragePolicy(new Path(file));
 // Wait till namenode notified about the block location details
-DFSTestUtil.waitExpectedStorageType(
-file, StorageType.ARCHIVE, 3, 3, dfs);
+DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 35000,
+dfs);
   }
 
   @Test(timeout = 30)
@@ -1284,6 +1282,7 @@ public class TestStoragePolicySatisfier {
 {StorageType.ARCHIVE, StorageType.SSD},
 {StorageType.DISK, StorageType.DISK}};
 config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
 hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
 storagesPerDatanode, capacity);
 dfs = hdfsCluster.getFileSystem();
@@ -1299,19 +1298,28 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = Mockito.mock(StoragePolicySatisfier.class);
-Mockito.when(sps.isRunning()).thenReturn(true);
-Context ctxt = Mockito.mock(Context.class);
-config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
-Mockito.when(ctxt.getConf()).thenReturn(config);
-Mockito.when(ctxt.isRunning()).thenReturn(true);
-Mockito.when(ctxt.isInSafeMode()).thenReturn(false);
-Mockito.when(ctxt.isFileExist(Mockito.anyLong())).thenReturn(true);
-BlockStorageMovementNeeded movmentNeededQueue =
-new BlockStorageMovementNeeded(ctxt);
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(hdfsCluster.getNamesystem(),
+hdfsCluster.getNamesystem().getBlockManager(), sps) {
+  @Override
+  public boolean isInSafeMode() {
+return false;
+  }
+
+  @Override
+  public boolean isRunning() {
+return true;
+  }
+};
+
+FileIdCollector fileIDCollector =
+new IntraSPSNameNodeFileIdCollector(fsDir, sps);
+sps.init(ctxt, fileIDCollector, null);
+sps.getStorageMovementQueue().activate();
+
 INode rootINode = fsDir.getINode("/root");
-movmentNeededQueue.addToPendingDirQueue(rootINode.getId());
-movmentNeededQueue.init(fsDir);
+hdfsCluster.getNamesystem().getBlockManager()
+.addSPSPathId(rootINode.getId());
 
 //Wait for thread to reach U.
 Thread.sleep(1000);
@@ -1321,7 +1329,7 @@ public class TestStoragePolicySatisfier {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1332,7 +1340,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and R,S should not be added in
 // queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1352,6 +1360,7 @@ public class 

[18/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
new file mode 100644
index 000..8dc52dc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -0,0 +1,1779 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
+ * moved and finding its suggested target locations to move.
+ */
+public class TestStoragePolicySatisfier {
+
+  {
+GenericTestUtils.setLogLevel(
+getLogger(FSTreeTraverser.class), Level.DEBUG);
+  }
+
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
+  private final Configuration config = new HdfsConfiguration();
+  private StorageType[][] allDiskTypes =
+ 

[42/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 18acb50..d9a93fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -32,34 +32,57 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_ADDRESS_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_KERBEROS_PRINCIPAL_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_KEYTAB_FILE_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SPS_MAX_OUTSTANDING_PATHS_KEY;
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY;
 import static 
org.apache.hadoop.hdfs.client.HdfsClientConfigKeys.DFS_DATA_TRANSFER_PROTECTION_KEY;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.fail;
 
 import java.io.File;
+import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.net.URI;
+import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
-import java.util.Collection;
 import java.util.List;
 import java.util.Properties;
 import java.util.concurrent.TimeoutException;
 
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
 import 
org.apache.hadoop.hdfs.server.namenode.sps.BlockStorageMovementAttemptedItems;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
-import org.apache.hadoop.hdfs.server.namenode.sps.TestStoragePolicySatisfier;
 import org.apache.hadoop.http.HttpConfig;
 import org.apache.hadoop.minikdc.MiniKdc;
 import org.apache.hadoop.security.SecurityUtil;
@@ -67,29 +90,57 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.ssl.KeyStoreTestUtil;
 import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
+import org.junit.Before;
 import org.junit.Ignore;
 import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 import com.google.common.base.Supplier;
 
 /**
  * Tests the external sps service plugins.
  */
-public class TestExternalStoragePolicySatisfier
-extends TestStoragePolicySatisfier {
+public class TestExternalStoragePolicySatisfier {
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
   private StorageType[][] allDiskTypes =
  

[49/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier 
file path. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66e8f9b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66e8f9b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66e8f9b3

Branch: refs/heads/HDFS-10285
Commit: 66e8f9b31529226309c924226a53dead3e6fcf11
Parents: 2acc50b
Author: Uma Maheswara Rao G 
Authored: Mon Jul 2 17:22:00 2018 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  2 +-
 .../NamenodeProtocolTranslatorPB.java   |  2 +-
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  9 ---
 .../sps/BlockStorageMovementAttemptedItems.java | 72 +++--
 .../sps/BlockStorageMovementNeeded.java | 61 ++
 .../hdfs/server/namenode/sps/Context.java   | 45 ---
 .../namenode/sps/DatanodeCacheManager.java  |  4 +-
 .../hdfs/server/namenode/sps/FileCollector.java | 13 +--
 .../namenode/sps/IntraSPSNameNodeContext.java   | 54 +
 .../sps/IntraSPSNameNodeFileIdCollector.java| 14 ++--
 .../hdfs/server/namenode/sps/ItemInfo.java  | 34 
 .../hdfs/server/namenode/sps/SPSService.java| 31 +++
 .../namenode/sps/StoragePolicySatisfier.java| 61 +-
 .../sps/StoragePolicySatisfyManager.java| 20 ++---
 .../hdfs/server/protocol/NamenodeProtocol.java  |  2 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  4 +-
 .../hdfs/server/sps/ExternalSPSContext.java | 85 
 .../sps/ExternalSPSFilePathCollector.java   | 36 +
 .../sps/ExternalStoragePolicySatisfier.java | 30 +--
 .../src/main/proto/NamenodeProtocol.proto   |  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 16 ++--
 .../sps/TestStoragePolicySatisfier.java | 66 +--
 ...stStoragePolicySatisfierWithStripedFile.java | 41 --
 .../sps/TestExternalStoragePolicySatisfier.java | 35 +++-
 27 files changed, 346 insertions(+), 414 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index e4283c6..d9367fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -265,7 +265,7 @@ public class NamenodeProtocolServerSideTranslatorPB 
implements
   RpcController controller, GetNextSPSPathRequestProto request)
   throws ServiceException {
 try {
-  String nextSPSPath = impl.getNextSPSPath();
+  Long nextSPSPath = impl.getNextSPSPath();
   if (nextSPSPath == null) {
 return GetNextSPSPathResponseProto.newBuilder().build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
index 97dee9b..3bd5986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
@@ -267,7 +267,7 @@ public class NamenodeProtocolTranslatorPB implements 
NamenodeProtocol,
   }
 
   @Override
-  public String getNextSPSPath() throws IOException {
+  public Long getNextSPSPath() throws IOException {
 GetNextSPSPathRequestProto req =
 GetNextSPSPathRequestProto.newBuilder().build();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[12/50] [abbrv] hadoop git commit: HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy storage policy of all the files under the given dir. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy 
storage policy of all the files under the given dir. Contributed by Surendra 
Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bfd3f8bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bfd3f8bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bfd3f8bd

Branch: refs/heads/HDFS-10285
Commit: bfd3f8bd8a9ae2186ec3e4addc71f912ec7b8923
Parents: 7ea24fc
Author: Uma Maheswara Rao G 
Authored: Sat Sep 30 06:31:52 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  22 +-
 .../BlockStorageMovementAttemptedItems.java |   8 +-
 .../namenode/BlockStorageMovementNeeded.java| 277 +++---
 .../server/namenode/ReencryptionHandler.java|   1 +
 .../server/namenode/StoragePolicySatisfier.java |  43 ++-
 .../src/main/resources/hdfs-default.xml |  23 ++
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../TestPersistentStoragePolicySatisfier.java   |   8 +-
 .../namenode/TestStoragePolicySatisfier.java| 377 ++-
 11 files changed, 689 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd3f8bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index e66806f..c90ca33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -618,6 +618,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.enabled";
   public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
   false;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY =
+  "dfs.storage.policy.satisfier.queue.limit";
+  public static final int  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT =
+  1000;
+  public static final String DFS_SPS_WORK_MULTIPLIER_PER_ITERATION =
+  "dfs.storage.policy.satisfier.work.multiplier.per.iteration";
+  public static final int DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT =
+  1;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd3f8bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index f5ceeaf..c26599c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1457,7 +1457,27 @@ public class DFSUtil {
 "It should be a positive, non-zero integer value.");
 return blocksReplWorkMultiplier;
   }
-  
+
+  /**
+   * Get DFS_SPS_WORK_MULTIPLIER_PER_ITERATION from
+   * configuration.
+   *
+   * @param conf Configuration
+   * @return Value of DFS_SPS_WORK_MULTIPLIER_PER_ITERATION
+   */
+  public static int getSPSWorkMultiplier(Configuration conf) {
+int spsWorkMultiplier = conf
+.getInt(
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION,
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
+Preconditions.checkArgument(
+(spsWorkMultiplier > 0),
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION +
+" = '" + spsWorkMultiplier + "' is invalid. " +
+"It should be a positive, non-zero integer value.");
+return spsWorkMultiplier;
+  }
+
   /**
* Get SPNEGO keytab Key from configuration
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bfd3f8bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 

[20/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
deleted file mode 100644
index 9f733ff..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ /dev/null
@@ -1,1775 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
- * moved and finding its suggested target locations to move.
- */
-public class TestStoragePolicySatisfier {
-
-  {
-GenericTestUtils.setLogLevel(
-getLogger(FSTreeTraverser.class), Level.DEBUG);
-  }
-
-  private static final String ONE_SSD = "ONE_SSD";
-  private static final String COLD = "COLD";
-  private static final Logger LOG =
-  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
-  private final Configuration config = new HdfsConfiguration();
-  private StorageType[][] allDiskTypes =
-  new StorageType[][]{{StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK}};
-  private MiniDFSCluster hdfsCluster = null;
-  final private int numOfDatanodes = 3;
-  

[40/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8467ec24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
deleted file mode 100644
index ff277ba..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
+++ /dev/null
@@ -1,174 +0,0 @@
-package org.apache.hadoop.hdfs.server.sps;
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.FileIdCollector;
-import org.apache.hadoop.hdfs.server.namenode.sps.ItemInfo;
-import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is to scan the paths recursively. If file is directory, then it
- * will scan for files recursively. If the file is non directory, then it will
- * just submit the same file to process.
- */
-@InterfaceAudience.Private
-public class ExternalSPSFileIDCollector implements FileIdCollector {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(ExternalSPSFileIDCollector.class);
-  private Context cxt;
-  private DistributedFileSystem dfs;
-  private SPSService service;
-  private int maxQueueLimitToScan;
-
-  public ExternalSPSFileIDCollector(Context cxt, SPSService service) {
-this.cxt = cxt;
-this.service = service;
-this.maxQueueLimitToScan = service.getConf().getInt(
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY,
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT);
-try {
-  // TODO: probably we could get this dfs from external context? but this 
is
-  // too specific to external.
-  dfs = getFS(service.getConf());
-} catch (IOException e) {
-  LOG.error("Unable to get the filesystem. Make sure Namenode running and "
-  + "configured namenode address is correct.", e);
-}
-  }
-
-  private DistributedFileSystem getFS(Configuration conf) throws IOException {
-return (DistributedFileSystem) FileSystem
-.get(FileSystem.getDefaultUri(conf), conf);
-  }
-
-  /**
-   * Recursively scan the given path and add the file info to SPS service for
-   * processing.
-   */
-  private long processPath(long startID, String fullPath) {
-long pendingWorkCount = 0; // to be satisfied file counter
-for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
-  final DirectoryListing children;
-  try {
-children = dfs.getClient().listPaths(fullPath, lastReturnedName, 
false);
-  } catch (IOException e) {
-LOG.warn("Failed to list directory " + fullPath
-+ ". Ignore the directory and continue.", e);
-return pendingWorkCount;
-  }
-  if (children == null) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("The scanning start dir/sub dir " + fullPath
-  + " does not have childrens.");
-}
-return pendingWorkCount;
-  }
-
-  for (HdfsFileStatus child : children.getPartialListing()) {
-if (child.isFile()) {
-  service.addFileIdToProcess(new ItemInfo(startID, child.getFileId()),
-  false);
-  checkProcessingQueuesFree();
-  pendingWorkCount++; // increment to be satisfied file count
-} else {
-  String fullPathStr = child.getFullName(fullPath);
- 

[34/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
index 0e3a5a3..2257608 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -103,8 +104,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -216,8 +217,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -328,8 +329,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 conf.set(DFSConfigKeys
 .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
 "3000");
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -420,8 +421,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3b83110d/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 9a401bd..42b04da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
@@ -54,12 +55,19 @@ public class TestExternalStoragePolicySatisfier
   new StorageType[][]{{StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK}};

[47/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db3f227d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db3f227d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db3f227d

Branch: refs/heads/HDFS-10285
Commit: db3f227d8aeeea8b5bb473fed9ca4f6a17b0fca5
Parents: 66e8f9b
Author: Rakesh Radhakrishnan 
Authored: Thu Jul 5 10:10:13 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:06 2018 -0700

--
 .../hdfs/server/federation/router/RouterNamenodeProtocol.java | 6 ++
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java | 7 +++
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../server/namenode/sps/IntraSPSNameNodeFileIdCollector.java  | 4 ++--
 4 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index 0433650..edfb391 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -184,4 +184,10 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 rpcServer.checkOperation(OperationCategory.READ, false);
 return false;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index d93f99d..36645c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2509,4 +2509,11 @@ public class RouterRpcServer extends AbstractService
 checkOperation(OperationCategory.READ, false);
 return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+checkOperation(OperationCategory.READ, false);
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bae6b4e..bb63f2a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -5078,7 +5078,7 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
 String modeVal = spsMode;
-if (org.apache.commons.lang.StringUtils.isBlank(modeVal)) {
+if (org.apache.commons.lang3.StringUtils.isBlank(modeVal)) {
   modeVal = conf.get(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
   DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3f227d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeFileIdCollector.java
--
diff --git 

[31/50] [abbrv] hadoop git commit: HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d3de4fb2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d3de4fb2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d3de4fb2

Branch: refs/heads/HDFS-10285
Commit: d3de4fb2a084cbadab8ef91f11aa7732d3b0f308
Parents: 5845c36
Author: Surendra Singh Lilhore 
Authored: Mon Jan 29 23:59:55 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +-
 .../server/blockmanagement/BlockManager.java|  33 +++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  15 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  41 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  11 ++
 .../hdfs/server/namenode/sps/SPSPathIds.java|   8 +-
 .../namenode/sps/StoragePolicySatisfier.java|   6 +-
 .../hdfs/server/sps/ExternalSPSContext.java |   4 +
 .../sps/ExternalStoragePolicySatisfier.java |  30 ++-
 .../sps/TestStoragePolicySatisfier.java |   7 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 195 ++-
 11 files changed, 323 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3de4fb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index bf29d14..b354d64 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -614,7 +614,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.mover.max-no-move-interval";
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
-  // SPS related configurations
+  // StoragePolicySatisfier (SPS) related configurations
   public static final String  DFS_STORAGE_POLICY_SATISFIER_MODE_KEY =
   "dfs.storage.policy.satisfier.mode";
   public static final String DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT =
@@ -643,6 +643,18 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.low.max-streams.preference";
   public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_DEFAULT =
   true;
+  public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
+  "dfs.storage.policy.satisfier.max.outstanding.paths";
+  public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+
+  // SPS keytab configurations, by default it is disabled.
+  public static final String  DFS_SPS_ADDRESS_KEY =
+  "dfs.storage.policy.satisfier.address";
+  public static final String  DFS_SPS_ADDRESS_DEFAULT= "0.0.0.0:0";
+  public static final String  DFS_SPS_KEYTAB_FILE_KEY =
+  "dfs.storage.policy.satisfier.keytab.file";
+  public static final String  DFS_SPS_KERBEROS_PRINCIPAL_KEY =
+  "dfs.storage.policy.satisfier.kerberos.principal";
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d3de4fb2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 4ea64a3..9205910 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -439,6 +439,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final boolean storagePolicyEnabled;
   private StoragePolicySatisfierMode spsMode;
   private SPSPathIds spsPaths;
+  private final int spsOutstandingPathsLimit;
 
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
@@ -478,14 +479,16 @@ public class BlockManager implements BlockStatsMXBean {
 

[50/50] [abbrv] hadoop git commit: HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and #getFileInfo() function. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and 
#getFileInfo() function. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3ac07b72
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3ac07b72
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3ac07b72

Branch: refs/heads/HDFS-10285
Commit: 3ac07b720b7839a7fe6c83f4ccfe319b6a892501
Parents: 39ed3a6
Author: Uma Maheswara Rao Gangumalla 
Authored: Sat Aug 11 23:22:59 2018 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:07 2018 -0700

--
 .../router/RouterNamenodeProtocol.java  |  1 +
 .../server/blockmanagement/BlockManager.java| 34 
 .../blockmanagement/DatanodeDescriptor.java |  2 +-
 .../server/blockmanagement/DatanodeManager.java | 17 --
 .../hdfs/server/datanode/BPServiceActor.java| 16 --
 .../hdfs/server/namenode/FSNamesystem.java  | 38 -
 .../hadoop/hdfs/server/namenode/Namesystem.java | 22 
 .../sps/BlockStorageMovementNeeded.java | 18 ++-
 .../hdfs/server/namenode/sps/Context.java   | 28 --
 .../hdfs/server/namenode/sps/SPSService.java|  5 +-
 .../namenode/sps/StoragePolicySatisfier.java| 19 +++
 .../hdfs/server/sps/ExternalSPSContext.java | 57 +---
 .../sps/ExternalStoragePolicySatisfier.java |  2 +-
 .../src/site/markdown/ArchivalStorage.md|  2 +-
 .../TestPersistentStoragePolicySatisfier.java   | 10 +++-
 ...stStoragePolicySatisfierWithStripedFile.java |  2 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  4 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  2 +-
 18 files changed, 39 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index edfb391..bf0db6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -187,6 +187,7 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 
   @Override
   public Long getNextSPSPath() throws IOException {
+rpcServer.checkOperation(OperationCategory.READ, false);
 // not supported
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3ac07b72/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 87bd155..d8a3aa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4300,21 +4300,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check file has low redundancy blocks.
-   */
-  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
-boolean result = false;
-for (BlockInfo block : bc.getBlocks()) {
-  short expected = getExpectedRedundancyNum(block);
-  final NumberReplicas n = countNodes(block);
-  if (expected > n.liveReplicas()) {
-result = true;
-  }
-}
-return result;
-  }
-
-  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,
@@ -5011,25 +4996,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check whether file id has low redundancy blocks.
-   *
-   * @param inodeID
-   *  - inode id
-   */
-  public boolean hasLowRedundancyBlocks(long inodeID) {
-namesystem.readLock();
-try {
-  BlockCollection bc = namesystem.getBlockCollection(inodeID);
-  if (bc == null) {
-return false;
-  }
-  return hasLowRedundancyBlocks(bc);
-} finally {

[23/50] [abbrv] hadoop git commit: HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05d4daf6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05d4daf6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05d4daf6

Branch: refs/heads/HDFS-10285
Commit: 05d4daf6ba3e5bd40f46e8003ee12fc7c613453d
Parents: 7842071
Author: Surendra Singh Lilhore 
Authored: Mon Jan 8 15:13:11 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../blockmanagement/DatanodeDescriptor.java |   2 +-
 .../server/blockmanagement/DatanodeManager.java |  22 ++
 .../server/namenode/FSDirStatAndListingOp.java  |   1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  44 ++-
 .../hdfs/server/namenode/IntraNNSPSContext.java |  41 --
 .../hadoop/hdfs/server/namenode/Namesystem.java |  24 ++
 .../sps/BlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/BlockStorageMovementNeeded.java |  48 ++-
 .../hdfs/server/namenode/sps/Context.java   | 181 +
 .../namenode/sps/IntraSPSNameNodeContext.java   | 220 +++
 .../namenode/sps/StoragePolicySatisfier.java| 374 +--
 .../TestBlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/TestStoragePolicySatisfier.java |  25 +-
 14 files changed, 742 insertions(+), 290 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05d4daf6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ec99a9f..5ee4026 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.Context;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -433,6 +434,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
+  private Context spsctxt = null;
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -479,8 +481,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
-sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
+spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
+sps = new StoragePolicySatisfier(spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5039,8 +5041,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(false);
+// TODO: FSDirectory will get removed via HDFS-12911 modularization work
+sps.start(false, namesystem.getFSDirectory());
   }
 
   /**
@@ -5076,8 +5078,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(true);
+// TODO: FSDirectory will get removed via HDFS-12911 modularization work
+sps.start(true, 

[32/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 85a101f..47ea39f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -252,8 +252,8 @@ public class TestNameNodeReconfigure {
 // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
 assertEquals("SPS shouldn't start as "
 + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled", false,
-nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL, false);
 
@@ -280,8 +280,8 @@ public class TestNameNodeReconfigure {
   fail("ReconfigurationException expected");
 } catch (ReconfigurationException e) {
   GenericTestUtils.assertExceptionContains(
-  "For enabling or disabling storage policy satisfier, we must "
-  + "pass either none/internal/external string value only",
+  "For enabling or disabling storage policy satisfier, must "
+  + "pass either internal/external/none string value only",
   e.getCause());
 }
 
@@ -301,8 +301,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.EXTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-false, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+false, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.EXTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -342,8 +342,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-true, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+true, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.INTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -353,7 +353,8 @@ public class TestNameNodeReconfigure {
   void verifySPSEnabled(final NameNode nameNode, String property,
   StoragePolicySatisfierMode expected, boolean isSatisfierRunning) {
 assertEquals(property + " has wrong value", isSatisfierRunning, nameNode
-.getNamesystem().getBlockManager().isStoragePolicySatisfierRunning());
+.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 String actual = nameNode.getConf().get(property,
 DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 assertEquals(property + " has wrong value", expected,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index b84214c..9f98777 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -389,7 +389,8 @@ public class TestPersistentStoragePolicySatisfier {
   fs.setStoragePolicy(testFile, ONE_SSD);
   fs.satisfyStoragePolicy(testFile);
 
-  cluster.getNamesystem().getBlockManager().disableSPS();
+  

[26/50] [abbrv] hadoop git commit: HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. Contributed by Uma Maheswara Rao G.

2018-08-12 Thread umamahesh
HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. 
Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3159b39c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3159b39c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3159b39c

Branch: refs/heads/HDFS-10285
Commit: 3159b39cf8ef704835325263154fb1a1cecc109d
Parents: 8d4f74e
Author: Rakesh Radhakrishnan 
Authored: Tue Jan 23 20:09:26 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:03 2018 -0700

--
 .../sps/BlockStorageMovementNeeded.java |  70 +++-
 .../hdfs/server/namenode/sps/Context.java   |   8 +
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   2 +
 .../namenode/sps/IntraSPSNameNodeContext.java   |   7 +
 .../sps/IntraSPSNameNodeFileIdCollector.java|   6 +-
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java|   8 +-
 .../server/sps/ExternalSPSFileIDCollector.java  | 156 +
 .../hadoop/hdfs/server/sps/package-info.java|  28 ++
 .../sps/TestStoragePolicySatisfier.java | 323 ++-
 .../sps/TestExternalStoragePolicySatisfier.java | 108 +++
 11 files changed, 556 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3159b39c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index 39a0051..b141502 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -97,23 +97,53 @@ public class BlockStorageMovementNeeded {
   }
 
   /**
-   * Add the itemInfo to tracking list for which storage movement
-   * expected if necessary.
+   * Add the itemInfo list to tracking list for which storage movement expected
+   * if necessary.
+   *
* @param startId
-   *- start id
+   *  - start id
* @param itemInfoList
-   *- List of child in the directory
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the start id directory has no more elements to
+   *  scan.
*/
   @VisibleForTesting
-  public synchronized void addAll(long startId,
-  List itemInfoList, boolean scanCompleted) {
+  public synchronized void addAll(long startId, List itemInfoList,
+  boolean scanCompleted) {
 storageMovementNeeded.addAll(itemInfoList);
+updatePendingDirScanStats(startId, itemInfoList.size(), scanCompleted);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement expected if
+   * necessary.
+   *
+   * @param itemInfoList
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the ItemInfo start id directory has no more
+   *  elements to scan.
+   */
+  @VisibleForTesting
+  public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
+storageMovementNeeded.add(itemInfo);
+// This represents sps start id is file, so no need to update pending dir
+// stats.
+if (itemInfo.getStartId() == itemInfo.getFileId()) {
+  return;
+}
+updatePendingDirScanStats(itemInfo.getStartId(), 1, scanCompleted);
+  }
+
+  private void updatePendingDirScanStats(long startId, int numScannedFiles,
+  boolean scanCompleted) {
 DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
 if (pendingWork == null) {
   pendingWork = new DirPendingWorkInfo();
   pendingWorkForDirectory.put(startId, pendingWork);
 }
-pendingWork.addPendingWorkCount(itemInfoList.size());
+pendingWork.addPendingWorkCount(numScannedFiles);
 if (scanCompleted) {
   pendingWork.markScanCompleted();
 }
@@ -250,13 +280,15 @@ public class BlockStorageMovementNeeded {
 
 @Override
 public void run() {
-  LOG.info("Starting FileInodeIdCollector!.");
+  LOG.info("Starting SPSPathIdProcessor!.");
   long lastStatusCleanTime = 0;
+  Long startINodeId = null;
   while (ctxt.isRunning()) {
-LOG.info("Running FileInodeIdCollector!.");
 try {
   if (!ctxt.isInSafeMode()) {
-Long startINodeId = ctxt.getNextSPSPathId();
+if 

[43/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Cleanup work for HDFS-10285 merge. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
index f85769f..f48521b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestBlockStorageMovementAttemptedItems.java
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.StorageTypeNodePair;
+import org.apache.hadoop.hdfs.server.sps.ExternalSPSContext;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -52,7 +53,7 @@ public class TestBlockStorageMovementAttemptedItems {
   @Before
   public void setup() throws Exception {
 Configuration config = new HdfsConfiguration();
-Context ctxt = Mockito.mock(IntraSPSNameNodeContext.class);
+Context ctxt = Mockito.mock(ExternalSPSContext.class);
 SPSService sps = new StoragePolicySatisfier(config);
 Mockito.when(ctxt.isRunning()).thenReturn(true);
 Mockito.when(ctxt.isInSafeMode()).thenReturn(false);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/39ed3a66/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
deleted file mode 100644
index ec5307b..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ /dev/null
@@ -1,1825 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode.sps;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.Block;
-import 

[48/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/66e8f9b3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index b05717a..ec5307b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -108,8 +108,6 @@ public class TestStoragePolicySatisfier {
   public static final long CAPACITY = 2 * 256 * 1024 * 1024;
   public static final String FILE = "/testMoveToSatisfyStoragePolicy";
   public static final int DEFAULT_BLOCK_SIZE = 1024;
-  private ExternalBlockMovementListener blkMoveListener =
-  new ExternalBlockMovementListener();
 
   /**
* Sets hdfs cluster.
@@ -1282,8 +1280,8 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1297,8 +1295,7 @@ public class TestStoragePolicySatisfier {
   }
 };
 
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1314,13 +1311,6 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), true);
   }
 
-  public FileCollector createFileIdCollector(
-  StoragePolicySatisfier sps, Context ctxt) {
-FileCollector fileIDCollector = new IntraSPSNameNodeFileIdCollector(
-hdfsCluster.getNamesystem().getFSDirectory(), sps);
-return fileIDCollector;
-  }
-
   /**
*  Test traverse when root parent got deleted.
*  1. Delete L when traversing Q
@@ -1351,8 +1341,8 @@ public class TestStoragePolicySatisfier {
 
 // Queue limit can control the traverse logic to wait for some free
 // entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1365,8 +1355,7 @@ public class TestStoragePolicySatisfier {
 return true;
   }
 };
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1383,12 +1372,12 @@ public class TestStoragePolicySatisfier {
   }
 
   private void assertTraversal(List expectedTraverseOrder,
-  FSDirectory fsDir, StoragePolicySatisfier sps)
+  FSDirectory fsDir, StoragePolicySatisfier sps)
   throws InterruptedException {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1403,7 +1392,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and E, M, U, R, S should not be
 // added in queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1717,17 +1706,17 @@ public class TestStoragePolicySatisfier {
   public void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
   int timeout) throws TimeoutException, InterruptedException {
 BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
-final StoragePolicySatisfier sps =
-(StoragePolicySatisfier) blockManager.getSPSManager()
+final StoragePolicySatisfier sps =
+

[41/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by 
external satisfier. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8467ec24
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8467ec24
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8467ec24

Branch: refs/heads/HDFS-10285
Commit: 8467ec24fb74f30371d5a13e893fc56309ee9372
Parents: 4402f3f
Author: Rakesh Radhakrishnan 
Authored: Fri Feb 16 17:01:38 2018 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:05 2018 -0700

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  46 +
 .../NamenodeProtocolTranslatorPB.java   |  42 +
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  32 +---
 .../server/namenode/ReencryptionHandler.java|   2 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  42 +++--
 .../sps/BlockStorageMovementNeeded.java | 119 +++--
 .../hdfs/server/namenode/sps/Context.java   |  55 +++---
 .../hdfs/server/namenode/sps/FileCollector.java |  48 +
 .../server/namenode/sps/FileIdCollector.java|  43 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |  39 ++---
 .../sps/IntraSPSNameNodeFileIdCollector.java|  23 +--
 .../hdfs/server/namenode/sps/ItemInfo.java  |  39 +++--
 .../hdfs/server/namenode/sps/SPSService.java|  32 ++--
 .../namenode/sps/StoragePolicySatisfier.java| 129 +-
 .../sps/StoragePolicySatisfyManager.java|   6 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  24 +--
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   4 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  60 +++
 .../server/sps/ExternalSPSFileIDCollector.java  | 174 ---
 .../sps/ExternalSPSFilePathCollector.java   | 172 ++
 .../sps/ExternalStoragePolicySatisfier.java |   7 +-
 .../src/main/proto/NamenodeProtocol.proto   |  27 +--
 .../TestBlockStorageMovementAttemptedItems.java |  27 ++-
 .../sps/TestStoragePolicySatisfier.java |  52 +++---
 ...stStoragePolicySatisfierWithStripedFile.java |  15 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 148 +++-
 27 files changed, 701 insertions(+), 708 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8467ec24/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 25eafdf..ed176cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -35,16 +35,12 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 

[24/50] [abbrv] hadoop git commit: HDFS-12310: [SPS]: Provide an option to track the status of in progress requests. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12310: [SPS]: Provide an option to track the status of in progress 
requests. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68017e33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68017e33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68017e33

Branch: refs/heads/HDFS-10285
Commit: 68017e3349e3b71a9c49f2ccea2558231ff8485d
Parents: 5780f06
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 3 08:18:14 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  22 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  21 
 .../hadoop/hdfs/protocol/HdfsConstants.java |  27 +
 .../ClientNamenodeProtocolTranslatorPB.java |  20 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  33 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  17 ++-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  23 +++-
 .../server/blockmanagement/BlockManager.java|  12 ++
 .../namenode/BlockStorageMovementNeeded.java| 109 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 ++-
 .../server/namenode/StoragePolicySatisfier.java |   8 ++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  35 +-
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestPersistentStoragePolicySatisfier.java   |   2 +-
 .../namenode/TestStoragePolicySatisfier.java|  67 
 .../hdfs/tools/TestStoragePolicyCommands.java   |  18 +++
 16 files changed, 424 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68017e33/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 7337aa2..471ab2c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -3169,4 +3170,25 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
+
+  /**
+   * Check the storage policy satisfy status of the path for which
+   * {@link DFSClient#satisfyStoragePolicy(String)} is called.
+   *
+   * @return Storage policy satisfy status.
+   * 
+   * PENDING if path is in queue and not processed for satisfying
+   * the policy.
+   * IN_PROGRESS if satisfying the storage policy for path.
+   * SUCCESS if storage policy satisfied for the path.
+   * NOT_AVAILABLE if
+   * {@link DFSClient#satisfyStoragePolicy(String)} not called for
+   * path or SPS work is already finished.
+   * 
+   * @throws IOException
+   */
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+return namenode.checkStoragePolicySatisfyPathStatus(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/68017e33/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 81d7c91..360fd63 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import 

[38/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/2acc50b8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
index 7580ba9..f5225d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
@@ -20,13 +20,10 @@ package org.apache.hadoop.hdfs.server.sps;
 
 import java.io.IOException;
 import java.net.Socket;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -39,7 +36,6 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
@@ -48,15 +44,14 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.balancer.KeyManager;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
+import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementAttemptFinished;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementStatus;
 import org.apache.hadoop.hdfs.server.common.sps.BlockStorageMovementTracker;
 import org.apache.hadoop.hdfs.server.common.sps.BlocksMovementsStatusHandler;
-import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMoveTaskHandler;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
@@ -105,12 +100,14 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 int ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
 blkDispatcher = new BlockDispatcher(HdfsConstants.READ_TIMEOUT,
 ioFileBufferSize, connectToDnViaHostname);
+
+startMovementTracker();
   }
 
   /**
* Initializes block movement tracker daemon and starts the thread.
*/
-  public void init() {
+  private void startMovementTracker() {
 movementTrackerThread = new Daemon(this.blkMovementTracker);
 movementTrackerThread.setName("BlockStorageMovementTracker");
 movementTrackerThread.start();
@@ -156,24 +153,16 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 // dn.incrementBlocksScheduled(blkMovingInfo.getTargetStorageType());
 LOG.debug("Received BlockMovingTask {}", blkMovingInfo);
 BlockMovingTask blockMovingTask = new BlockMovingTask(blkMovingInfo);
-Future moveCallable = mCompletionServ
-.submit(blockMovingTask);
-blkMovementTracker.addBlock(blkMovingInfo.getBlock(), moveCallable);
+mCompletionServ.submit(blockMovingTask);
   }
 
   private class ExternalBlocksMovementsStatusHandler
-  extends BlocksMovementsStatusHandler {
+  implements BlocksMovementsStatusHandler {
 @Override
-public void handle(
-List moveAttemptFinishedBlks) {
-  List blocks = new ArrayList<>();
-  for (BlockMovementAttemptFinished item : moveAttemptFinishedBlks) {
-blocks.add(item.getBlock());
-  }
-  BlocksStorageMoveAttemptFinished blkAttempted =
-  new BlocksStorageMoveAttemptFinished(
-  blocks.toArray(new Block[blocks.size()]));
-  service.notifyStorageMovementAttemptFinishedBlks(blkAttempted);
+public void handle(BlockMovementAttemptFinished attemptedMove) {
+  service.notifyStorageMovementAttemptFinishedBlk(
+  attemptedMove.getTargetDatanode(), attemptedMove.getTargetType(),
+  attemptedMove.getBlock());
 }
   }
 
@@ -194,6 +183,7 @@ public class 

[25/50] [abbrv] hadoop git commit: HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed 
by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c561cb31
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c561cb31
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c561cb31

Branch: refs/heads/HDFS-10285
Commit: c561cb316e365ef674784cd6cf0b12c0fbc271a3
Parents: 9b83f94
Author: Surendra Singh Lilhore 
Authored: Wed Nov 15 20:22:27 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:02 2018 -0700

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  6 +++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  4 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 10 ++--
 .../server/blockmanagement/DatanodeManager.java | 12 ++---
 .../datanode/StoragePolicySatisfyWorker.java|  3 +-
 .../BlockStorageMovementAttemptedItems.java |  8 +--
 .../namenode/BlockStorageMovementNeeded.java| 46 
 .../hdfs/server/namenode/FSNamesystem.java  |  3 ++
 .../server/namenode/StoragePolicySatisfier.java | 42 ---
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 27 +++---
 .../src/main/resources/hdfs-default.xml | 17 --
 .../src/site/markdown/ArchivalStorage.md|  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 10 ++--
 .../namenode/TestStoragePolicySatisfier.java| 57 ++--
 15 files changed, 199 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c561cb31/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index 190a1c6..aabcdd9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -150,6 +150,12 @@ public final class HdfsConstants {
 SUCCESS,
 
 /**
+ * Few blocks failed to move and the path is still not
+ * fully satisfied the storage policy.
+ */
+FAILURE,
+
+/**
  * Status not available.
  */
 NOT_AVAILABLE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c561cb31/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 9281bff..7770e31 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -3409,6 +3409,8 @@ public class PBHelperClient {
   return StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:
@@ -3425,6 +3427,8 @@ public class PBHelperClient {
   return HdfsConstants.StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return HdfsConstants.StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return HdfsConstants.StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return HdfsConstants.StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c561cb31/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 1de13ca..933a19a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -854,7 +854,8 @@ message CheckStoragePolicySatisfyPathStatusResponseProto {
 PENDING = 0;
 IN_PROGRESS = 1;
 SUCCESS = 2;
-NOT_AVAILABLE = 

[33/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-08-12 Thread umamahesh
HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by 
Surendra Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4402f3f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4402f3f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4402f3f8

Branch: refs/heads/HDFS-10285
Commit: 4402f3f8557527d5c6cdad6f5bdcbd707b8cbf52
Parents: d3de4fb
Author: Uma Maheswara Rao G 
Authored: Wed Feb 7 02:28:23 2018 -0800
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:04 2018 -0700

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   6 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   8 +-
 .../federation/router/RouterRpcServer.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  61 ---
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +-
 .../server/blockmanagement/BlockManager.java| 255 +---
 .../blockmanagement/DatanodeDescriptor.java |  33 +-
 .../hdfs/server/common/HdfsServerConstants.java |   2 +-
 .../datanode/StoragePolicySatisfyWorker.java|  15 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  26 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   1 -
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  46 +--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  30 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  21 +-
 .../sps/BlockStorageMovementNeeded.java |   4 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   6 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|  70 
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java| 137 ---
 .../sps/StoragePolicySatisfyManager.java| 399 +++
 .../sps/ExternalStoragePolicySatisfier.java |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  19 +-
 .../TestPersistentStoragePolicySatisfier.java   |   3 +-
 .../TestStoragePolicySatisfierWithHA.java   |   6 +-
 .../sps/TestStoragePolicySatisfier.java |  35 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  24 +-
 33 files changed, 665 insertions(+), 604 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 471ab2c..b6f9bdd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3110,8 +3110,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public boolean isStoragePolicySatisfierRunning() throws IOException {
-return namenode.isStoragePolicySatisfierRunning();
+  public boolean isInternalSatisfierRunning() throws IOException {
+return namenode.isInternalSatisfierRunning();
   }
 
   Tracer getTracer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4402f3f8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 360fd63..5c51c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1759,12 +1759,12 @@ public interface ClientProtocol {
   void satisfyStoragePolicy(String path) throws IOException;
 
   /**
-   * Check if StoragePolicySatisfier is running.
-   * @return true if StoragePolicySatisfier is running
+   * Check if internal StoragePolicySatisfier is running.
+   * @return true if internal StoragePolicySatisfier is running
* @throws IOException
*/
   @Idempotent
-  boolean isStoragePolicySatisfierRunning() throws IOException;
+  boolean 

[04/50] [abbrv] hadoop git commit: HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G

2018-08-12 Thread umamahesh
HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/68af4e19
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/68af4e19
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/68af4e19

Branch: refs/heads/HDFS-10285
Commit: 68af4e199a754ca6c727b844a22ecabe9dc7cc68
Parents: 5eb24ef
Author: Rakesh Radhakrishnan 
Authored: Fri Jul 14 22:36:09 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../src/site/markdown/ArchivalStorage.md| 51 ++--
 1 file changed, 48 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/68af4e19/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index a56cf8b..9098616 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -97,8 +97,44 @@ The effective storage policy can be retrieved by the 
"[`storagepolicies -getStor
 
 The default storage type of a datanode storage location will be DISK if it 
does not have a storage type tagged explicitly.
 
-Mover - A New Data Migration Tool
--
+Storage Policy Based Data Movement
+--
+
+Setting a new storage policy on already existing file/dir will change the 
policy in Namespace, but it will not move the blocks physically across storage 
medias.
+Following 2 options will allow users to move the blocks based on new policy 
set. So, once user change/set to a new policy on file/directory, user should 
also perform one of the following options to achieve the desired data movement. 
Note that both options cannot be allowed to run simultaneously.
+
+### Storage Policy Satisfier (SPS)
+
+When user changes the storage policy on a file/directory, user can call 
`HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new 
policy set.
+The SPS daemon thread runs along with namenode and periodically scans for the 
storage mismatches between new policy set and the physical blocks placed. This 
will only track the files/directories for which user invoked 
satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, 
then it will schedule block movement tasks to datanodes. A Coordinator 
DataNode(C-DN) will track all block movements associated to a file and notify 
to namenode about movement success/failure. If there are any failures in 
movement, the SPS will re-attempt by sending new block movement task.
+
+SPS can be activated and deactivated dynamically without restarting the 
Namenode.
+
+Detailed design documentation can be found at [Storage Policy Satisfier(SPS) 
(HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
+
+* **Note**: When user invokes `satisfyStoragePolicy()` API on a directory, SPS 
will consider the files which are immediate to that directory. Sub-directories 
won't be considered for satisfying the policy. Its user responsibility to call 
this API on directories recursively, to track all files under the sub tree.
+
+* HdfsAdmin API :
+`public void satisfyStoragePolicy(final Path path) throws IOException`
+
+* Arguments :
+
+| | |
+|: |: |
+| `path` | A path which requires blocks storage movement. |
+
+Configurations:
+
+*   **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate 
SPS. Configuring true represents SPS is
+   activated and vice versa.
+
+*   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to 
re-check the processed block storage movement
+   command results from Co-ordinator Datanode.
+
+*   **dfs.storage.policy.satisfier.self.retry.timeout.millis** - A timeout to 
retry if no block movement results reported from
+   Co-ordinator Datanode in this configured timeout.
+
+### Mover - A New Data Migration Tool
 
 A new data migration tool is added for archiving data. The tool is similar to 
Balancer. It periodically scans the files in HDFS to check if the block 
placement satisfies the storage policy. For the blocks violating the storage 
policy, it moves the replicas to a different storage type in order to fulfill 
the storage policy requirement. Note that it always tries to move block 
replicas within the same node whenever possible. If that is not possible (e.g. 
when a node doesn’t have the target storage type) then it will copy the block 
replicas to another node over the network.
 
@@ -115,6 +151,10 @@ A new data 

[09/50] [abbrv] hadoop git commit: HDFS-12556: [SPS] : Block movement analysis should be done in read lock.

2018-08-12 Thread umamahesh
HDFS-12556: [SPS] : Block movement analysis should be done in read lock.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5780f062
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5780f062
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5780f062

Branch: refs/heads/HDFS-10285
Commit: 5780f0624de2531194bc98eb25a928f7a483b992
Parents: 00eceed
Author: Surendra Singh Lilhore 
Authored: Sat Oct 14 15:11:26 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../server/namenode/StoragePolicySatisfier.java | 27 +---
 .../TestPersistentStoragePolicySatisfier.java   |  2 +-
 2 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5780f062/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a28a806..cbfba44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -242,12 +242,25 @@ public class StoragePolicySatisfier implements Runnable {
   ItemInfo itemInfo = storageMovementNeeded.get();
   if (itemInfo != null) {
 long trackId = itemInfo.getTrackId();
-BlockCollection blockCollection =
-namesystem.getBlockCollection(trackId);
-// Check blockCollectionId existence.
+BlockCollection blockCollection;
+BlocksMovingAnalysis status = null;
+try {
+  namesystem.readLock();
+  blockCollection = namesystem.getBlockCollection(trackId);
+  // Check blockCollectionId existence.
+  if (blockCollection == null) {
+// File doesn't exists (maybe got deleted), remove trackId from
+// the queue
+storageMovementNeeded.removeItemTrackInfo(itemInfo);
+  } else {
+status =
+analyseBlocksStorageMovementsAndAssignToDN(
+blockCollection);
+  }
+} finally {
+  namesystem.readUnlock();
+}
 if (blockCollection != null) {
-  BlocksMovingAnalysis status =
-  analyseBlocksStorageMovementsAndAssignToDN(blockCollection);
   switch (status.status) {
   // Just add to monitor, so it will be retried after timeout
   case ANALYSIS_SKIPPED_FOR_RETRY:
@@ -283,10 +296,6 @@ public class StoragePolicySatisfier implements Runnable {
 storageMovementNeeded.removeItemTrackInfo(itemInfo);
 break;
   }
-} else {
-  // File doesn't exists (maybe got deleted), remove trackId from
-  // the queue
-  storageMovementNeeded.removeItemTrackInfo(itemInfo);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5780f062/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 5bce296..7165d06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -72,7 +72,7 @@ public class TestPersistentStoragePolicySatisfier {
   {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD}
   };
 
-  private final int timeout = 30;
+  private final int timeout = 9;
 
   /**
* Setup environment for every test case.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/50] [abbrv] hadoop git commit: HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7ea24fc0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7ea24fc0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7ea24fc0

Branch: refs/heads/HDFS-10285
Commit: 7ea24fc06c081e2ba6f5f66d212abb14b80c9064
Parents: 0e820f1
Author: Uma Maheswara Rao G 
Authored: Wed Aug 23 15:37:03 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:01 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|  21 +-
 .../server/blockmanagement/DatanodeManager.java |  14 +-
 .../hdfs/server/datanode/BPOfferService.java|   1 +
 .../BlockStorageMovementAttemptedItems.java |  95 +---
 .../namenode/BlockStorageMovementNeeded.java| 233 ++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  91 +++-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 108 ++---
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../TestBlockStorageMovementAttemptedItems.java |  34 +--
 .../TestPersistentStoragePolicySatisfier.java   | 104 +
 .../namenode/TestStoragePolicySatisfier.java| 127 +-
 14 files changed, 589 insertions(+), 259 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ea24fc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index bcc07cc..b53d946 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementNeeded;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -431,9 +430,6 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private final BlockStorageMovementNeeded storageMovementNeeded =
-  new BlockStorageMovementNeeded();
-
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -480,8 +476,7 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
-conf);
+sps = new StoragePolicySatisfier(namesystem, this, conf);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5017,20 +5012,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Set file block collection for which storage movement needed for its 
blocks.
-   *
-   * @param id
-   *  - file block collection id.
-   */
-  public void satisfyStoragePolicy(long id) {
-storageMovementNeeded.add(id);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Added block collection id {} to block "
-  + "storageMovementNeeded queue", id);
-}
-  }
-
-  /**
* Gets the storage policy satisfier instance.
*
* @return sps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7ea24fc0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 

[08/50] [abbrv] hadoop git commit: HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks before removing the xattr. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks 
before removing the xattr. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b360b16
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b360b16
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b360b16

Branch: refs/heads/HDFS-10285
Commit: 0b360b16ab8759e3db606ada3420f4e2f56235f3
Parents: 00cf207
Author: Uma Maheswara Rao G 
Authored: Mon Jul 10 18:00:58 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../server/blockmanagement/BlockManager.java|  15 +++
 .../server/namenode/StoragePolicySatisfier.java |  20 +++-
 .../namenode/TestStoragePolicySatisfier.java| 102 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  90 
 4 files changed, 224 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b360b16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 3385af6..988067c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4324,6 +4324,21 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Check file has low redundancy blocks.
+   */
+  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
+boolean result = false;
+for (BlockInfo block : bc.getBlocks()) {
+  short expected = getExpectedRedundancyNum(block);
+  final NumberReplicas n = countNodes(block);
+  if (expected > n.liveReplicas()) {
+result = true;
+  }
+}
+return result;
+  }
+
+  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b360b16/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 1b2afa3..97cbf1b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -99,7 +99,10 @@ public class StoragePolicySatisfier implements Runnable {
 // Represents that, the analysis skipped due to some conditions.
 // Example conditions are if no blocks really exists in block collection or
 // if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED;
+BLOCKS_TARGET_PAIRING_SKIPPED,
+// Represents that, All the reported blocks are satisfied the policy but
+// some of the blocks are low redundant.
+FEW_LOW_REDUNDANCY_BLOCKS
   }
 
   public StoragePolicySatisfier(final Namesystem namesystem,
@@ -247,6 +250,14 @@ public class StoragePolicySatisfier implements Runnable {
   case FEW_BLOCKS_TARGETS_PAIRED:
 this.storageMovementsMonitor.add(blockCollectionID, false);
 break;
+  case FEW_LOW_REDUNDANCY_BLOCKS:
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Adding trackID " + blockCollectionID
+  + " back to retry queue as some of the blocks"
+  + " are low redundant.");
+}
+this.storageMovementNeeded.add(blockCollectionID);
+break;
   // Just clean Xattrs
   case BLOCKS_TARGET_PAIRING_SKIPPED:
   case BLOCKS_ALREADY_SATISFIED:
@@ -347,11 +358,16 @@ public class StoragePolicySatisfier implements Runnable {
 boolean computeStatus = computeBlockMovingInfos(blockMovingInfos,
 blockInfo, expectedStorageTypes, existing, storages);
 if (computeStatus
-&& status != 

[13/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 57e9f94..70219f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -203,11 +203,11 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
-   * Tests to verify that the block storage movement results will be propagated
+   * Tests to verify that the block storage movement report will be propagated
* to Namenode via datanode heartbeat.
*/
   @Test(timeout = 30)
-  public void testPerTrackIdBlocksStorageMovementResults() throws Exception {
+  public void testBlksStorageMovementAttemptFinishedReport() throws Exception {
 try {
   createCluster();
   // Change policy to ONE_SSD
@@ -229,7 +229,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -276,7 +276,7 @@ public class TestStoragePolicySatisfier {
 fileName, StorageType.DISK, 2, 3, dfs);
   }
 
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
 } finally {
   shutdownCluster();
 }
@@ -457,7 +457,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -630,7 +630,7 @@ public class TestStoragePolicySatisfier {
   // No block movement will be scheduled as there is no target node
   // available with the required storage type.
   waitForAttemptedItems(1, 3);
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
   DFSTestUtil.waitExpectedStorageType(
   file1, StorageType.ARCHIVE, 1, 3, dfs);
   DFSTestUtil.waitExpectedStorageType(
@@ -691,7 +691,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 3, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -871,7 +871,7 @@ public class TestStoragePolicySatisfier {
   Set dns = hdfsCluster.getNamesystem()
   .getBlockManager().getDatanodeManager().getDatanodes();
   for (DatanodeDescriptor dd : dns) {
-assertNull(dd.getBlocksToMoveStorages());
+assertNull(dd.getBlocksToMoveStorages(1));
   }
 
   // Enable heart beats now
@@ -1224,7 +1224,7 @@ public class TestStoragePolicySatisfier {
   /**
* Test SPS for batch processing.
*/
-  @Test(timeout = 30)
+  @Test(timeout = 300)
   public void testBatchProcessingForSPSDirectory() throws Exception {
 try {
   StorageType[][] diskTypes = new StorageType[][] {
@@ -1252,7 +1252,7 @@ public class TestStoragePolicySatisfier {
 DFSTestUtil.waitExpectedStorageType(fileName, StorageType.ARCHIVE, 2,
 3, dfs);
   }
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
   String expectedLogMessage = "StorageMovementNeeded queue remaining"
   + " capacity is zero";
   assertTrue("Log output does not contain expected log message: "
@@ -1268,7 +1268,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete /root when traversing Q
*  2. U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1330,7 +1330,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete L when traversing Q
*  2. E, M, U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenRootParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1387,6 +1387,82 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), 

[03/50] [abbrv] hadoop git commit: HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

2018-08-12 Thread umamahesh
HDFS-12146. [SPS]: Fix 
TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks.
 Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e82e5a8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e82e5a8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e82e5a8

Branch: refs/heads/HDFS-10285
Commit: 9e82e5a86ea66b9d24d38b922ee5fa97b3391475
Parents: 68af4e1
Author: Rakesh Radhakrishnan 
Authored: Mon Jul 17 22:40:03 2017 +0530
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:06:00 2018 -0700

--
 .../server/namenode/TestStoragePolicySatisfier.java |  9 +
 .../TestStoragePolicySatisfierWithStripedFile.java  | 16 
 2 files changed, 13 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e82e5a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index be7236b..10ceae7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
   list.add(cluster.stopDataNode(0));
   list.add(cluster.stopDataNode(0));
   cluster.restartNameNodes();
-  cluster.restartDataNode(list.get(0), true);
-  cluster.restartDataNode(list.get(1), true);
+  cluster.restartDataNode(list.get(0), false);
+  cluster.restartDataNode(list.get(1), false);
   cluster.waitActive();
   fs.satisfyStoragePolicy(filePath);
-  Thread.sleep(3000 * 6);
-  cluster.restartDataNode(list.get(2), true);
+  DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+  StorageType.ARCHIVE, 2, 3, cluster.getFileSystem());
+  cluster.restartDataNode(list.get(2), false);
   DFSTestUtil.waitExpectedStorageType(filePath.toString(),
   StorageType.ARCHIVE, 3, 3, cluster.getFileSystem());
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e82e5a8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index f905ead..c070113 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
*/
   @Test(timeout = 30)
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
+// start 9 datanodes
+int numOfDatanodes = 9;
 int storagesPerDatanode = 2;
 long capacity = 20 * defaultStripeBlockSize;
 long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
-{StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE}})
 .storageCapacities(capacities)
 .build();
@@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
   }
   cluster.restartNameNodes();
   // Restart half datanodes
-  for (int i = 0; i < numOfDatanodes / 2; i++) {
-cluster.restartDataNode(list.get(i), true);
+  for (int i = 0; i < 5; i++) {
+cluster.restartDataNode(list.get(i), false);
   }
   cluster.waitActive();
   fs.satisfyStoragePolicy(fooFile);
-  Thread.sleep(3000 * 6);
+  DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
+  StorageType.ARCHIVE, 5, 3, cluster.getFileSystem());
   //Start reaming datanodes

[01/50] [abbrv] hadoop git commit: HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the edits log. Contributed by Surendra Singh Lilhore. [Forced Update!]

2018-08-12 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 85405a669 -> 3ac07b720 (forced update)


HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the 
edits log. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ce332dc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ce332dc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ce332dc

Branch: refs/heads/HDFS-10285
Commit: 5ce332dc9a072f8850ab71ba16898faf8e866c06
Parents: 6fe6c54
Author: Uma Maheswara Rao G 
Authored: Mon May 22 21:39:43 2017 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sun Aug 12 03:05:59 2018 -0700

--
 .../hdfs/server/namenode/FSDirAttrOp.java   |  91 
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 145 +++
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |  16 --
 .../hdfs/server/namenode/FSNamesystem.java  |  24 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  10 ++
 .../server/namenode/StoragePolicySatisfier.java |   4 +-
 .../TestPersistentStoragePolicySatisfier.java   |  90 +++-
 .../namenode/TestStoragePolicySatisfier.java|   5 +-
 9 files changed, 268 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ce332dc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 0df58bf..1dbee96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -43,14 +42,12 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -193,29 +190,6 @@ public class FSDirAttrOp {
 return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
-  String src, boolean logRetryCache) throws IOException {
-
-FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
-INodesInPath iip;
-fsd.writeLock();
-try {
-
-  // check operation permission.
-  iip = fsd.resolvePath(pc, src, DirOp.WRITE);
-  if (fsd.isPermissionEnabled()) {
-fsd.checkPathAccess(pc, iip, FsAction.WRITE);
-  }
-  XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-} finally {
-  fsd.writeUnlock();
-}
-fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-return fsd.getAuditFileInfo(iip);
-  }
-
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
   throws IOException {
 return bm.getStoragePolicies();
@@ -477,71 +451,6 @@ public class FSDirAttrOp {
 }
   }
 
-  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
-  BlockManager bm, FSDirectory fsd) throws IOException {
-
-final INode inode = FSDirectory.resolveLastINode(iip);
-final int snapshotId = iip.getLatestSnapshotId();
-final List candidateNodes = new ArrayList<>();
-
-// TODO: think about optimization here, label the dir instead
-// of the sub-files of the dir.
-if (inode.isFile()) {
-  candidateNodes.add(inode);
-} else if (inode.isDirectory()) {
-  for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
-  candidateNodes.add(node);
-}
-  }
-}
-
-// If node has satisfy xattr, then stop 

[21/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/78420719/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
new file mode 100644
index 000..5635621
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -0,0 +1,572 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.ItemInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A Class to track the block collection IDs (Inode's ID) for which physical
+ * storage movement needed as per the Namespace and StorageReports from DN.
+ * It scan the pending directories for which storage movement is required and
+ * schedule the block collection IDs for movement. It track the info of
+ * scheduled items and remove the SPS xAttr from the file/Directory once
+ * movement is success.
+ */
+@InterfaceAudience.Private
+public class BlockStorageMovementNeeded {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(BlockStorageMovementNeeded.class);
+
+  private final Queue storageMovementNeeded =
+  new LinkedList();
+
+  /**
+   * Map of startId and number of child's. Number of child's indicate the
+   * number of files pending to satisfy the policy.
+   */
+  private final Map pendingWorkForDirectory =
+  new HashMap();
+
+  private final Map spsStatus =
+  new ConcurrentHashMap<>();
+
+  private final Namesystem namesystem;
+
+  // List of pending dir to satisfy the policy
+  private final Queue spsDirsToBeTraveresed = new LinkedList();
+
+  private final StoragePolicySatisfier sps;
+
+  private Daemon inodeIdCollector;
+
+  private final int maxQueuedItem;
+
+  // Amount of time to cache the SUCCESS status of path before turning it to
+  // NOT_AVAILABLE.
+  private static long statusClearanceElapsedTimeMs = 30;
+
+  public BlockStorageMovementNeeded(Namesystem namesystem,
+  StoragePolicySatisfier sps, int queueLimit) {
+this.namesystem = namesystem;
+this.sps = sps;
+this.maxQueuedItem = queueLimit;
+  }
+
+  /**
+   * Add the candidate to tracking list for which storage movement
+   * expected if necessary.
+   *
+   * @param trackInfo
+   *  - track info for satisfy the policy
+   */
+  public synchronized void add(ItemInfo trackInfo) {
+spsStatus.put(trackInfo.getStartId(),
+new StoragePolicySatisfyPathStatusInfo(
+StoragePolicySatisfyPathStatus.IN_PROGRESS));
+storageMovementNeeded.add(trackInfo);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement
+   * expected if necessary.
+   * @param startId
+   *- start id
+  

[14/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d3c5cb1..2f621e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -156,7 +156,7 @@ import 
org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1517,14 +1517,15 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   boolean requestFullBlockReportLease,
   @Nonnull SlowPeerReports slowPeers,
   @Nonnull SlowDiskReports slowDisks,
-  BlocksStorageMovementResult[] blkMovementStatus) throws IOException {
+  BlocksStorageMoveAttemptFinished storageMovementFinishedBlks)
+  throws IOException {
 checkNNStartup();
 verifyRequest(nodeReg);
 return namesystem.handleHeartbeat(nodeReg, report,
 dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
 failedVolumes, volumeFailureSummary, requestFullBlockReportLease,
 slowPeers, slowDisks,
-blkMovementStatus);
+storageMovementFinishedBlks);
   }
 
   @Override // DatanodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/00eceed2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a4372d5..a28a806 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -44,7 +46,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.util.Daemon;
@@ -82,25 +84,38 @@ public class StoragePolicySatisfier implements Runnable {
   /**
* Represents the collective analysis status for all blocks.
*/
-  private enum BlocksMovingAnalysisStatus {
-// Represents that, the analysis skipped due to some conditions. A such
-// condition is if block collection is in incomplete state.
-ANALYSIS_SKIPPED_FOR_RETRY,
-// Represents that, all block storage movement needed blocks found its
-// targets.
-ALL_BLOCKS_TARGETS_PAIRED,
-// Represents that, only fewer or none of the block storage movement needed
-// block found its eligible targets.
-FEW_BLOCKS_TARGETS_PAIRED,
-// Represents that, none of the blocks found for block storage movements.
-BLOCKS_ALREADY_SATISFIED,
-// Represents that, the analysis skipped due to some conditions.
-// Example conditions are if no blocks really exists in block collection or
-// if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED,
-// Represents that, All the reported blocks are satisfied the policy but
-// some of the blocks are low redundant.
-FEW_LOW_REDUNDANCY_BLOCKS
+  private 

hadoop git commit: HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and #getFileInfo() function. Contributed by Rakesh R.

2018-08-12 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 0f142918a -> 85405a669


HDFS-13808: [SPS]: Remove unwanted FSNamesystem #isFileOpenedForWrite() and 
#getFileInfo() function. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85405a66
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85405a66
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85405a66

Branch: refs/heads/HDFS-10285
Commit: 85405a669d91b6203b7cdaac574ed303cfe0c0c5
Parents: 0f14291
Author: Uma Maheswara Rao Gangumalla 
Authored: Sat Aug 11 23:22:59 2018 -0700
Committer: Uma Maheswara Rao Gangumalla 
Committed: Sat Aug 11 23:22:59 2018 -0700

--
 .../router/RouterNamenodeProtocol.java  |  1 +
 .../server/blockmanagement/BlockManager.java| 34 
 .../blockmanagement/DatanodeDescriptor.java |  2 +-
 .../server/blockmanagement/DatanodeManager.java | 17 --
 .../hdfs/server/datanode/BPServiceActor.java| 16 --
 .../hdfs/server/namenode/FSNamesystem.java  | 38 -
 .../hadoop/hdfs/server/namenode/Namesystem.java | 22 
 .../sps/BlockStorageMovementNeeded.java | 18 ++-
 .../hdfs/server/namenode/sps/Context.java   | 28 --
 .../hdfs/server/namenode/sps/SPSService.java|  5 +-
 .../namenode/sps/StoragePolicySatisfier.java| 19 +++
 .../hdfs/server/sps/ExternalSPSContext.java | 57 +---
 .../sps/ExternalStoragePolicySatisfier.java |  2 +-
 .../src/site/markdown/ArchivalStorage.md|  2 +-
 .../TestPersistentStoragePolicySatisfier.java   | 10 +++-
 ...stStoragePolicySatisfierWithStripedFile.java |  2 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  4 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  2 +-
 18 files changed, 39 insertions(+), 240 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85405a66/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index edfb391..bf0db6e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -187,6 +187,7 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 
   @Override
   public Long getNextSPSPath() throws IOException {
+rpcServer.checkOperation(OperationCategory.READ, false);
 // not supported
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/85405a66/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 87bd155..d8a3aa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4300,21 +4300,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check file has low redundancy blocks.
-   */
-  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
-boolean result = false;
-for (BlockInfo block : bc.getBlocks()) {
-  short expected = getExpectedRedundancyNum(block);
-  final NumberReplicas n = countNodes(block);
-  if (expected > n.liveReplicas()) {
-result = true;
-  }
-}
-return result;
-  }
-
-  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,
@@ -5011,25 +4996,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Check whether file id has low redundancy blocks.
-   *
-   * @param inodeID
-   *  - inode id
-   */
-  public boolean hasLowRedundancyBlocks(long inodeID) {
-namesystem.readLock();
-try {
-  BlockCollection bc = namesystem.getBlockCollection(inodeID);
-  if (bc == null) {
-