[47/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e280b90/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index 2a7bde5..9354044 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -72,7 +72,6 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -147,12 +146,11 @@ public class TestStoragePolicySatisfier {
 startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
 storagesPerDatanode, capacity, hdfsCluster);
 
-dfs.satisfyStoragePolicy(new Path(file));
-
 hdfsCluster.triggerHeartbeats();
+dfs.satisfyStoragePolicy(new Path(file));
 // Wait till namenode notified about the block location details
-DFSTestUtil.waitExpectedStorageType(
-file, StorageType.ARCHIVE, 3, 3, dfs);
+DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 35000,
+dfs);
   }
 
   @Test(timeout = 30)
@@ -1284,6 +1282,7 @@ public class TestStoragePolicySatisfier {
 {StorageType.ARCHIVE, StorageType.SSD},
 {StorageType.DISK, StorageType.DISK}};
 config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
 hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
 storagesPerDatanode, capacity);
 dfs = hdfsCluster.getFileSystem();
@@ -1299,19 +1298,28 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = Mockito.mock(StoragePolicySatisfier.class);
-Mockito.when(sps.isRunning()).thenReturn(true);
-Context ctxt = Mockito.mock(Context.class);
-config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
-Mockito.when(ctxt.getConf()).thenReturn(config);
-Mockito.when(ctxt.isRunning()).thenReturn(true);
-Mockito.when(ctxt.isInSafeMode()).thenReturn(false);
-Mockito.when(ctxt.isFileExist(Mockito.anyLong())).thenReturn(true);
-BlockStorageMovementNeeded movmentNeededQueue =
-new BlockStorageMovementNeeded(ctxt);
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(hdfsCluster.getNamesystem(),
+hdfsCluster.getNamesystem().getBlockManager(), sps) {
+  @Override
+  public boolean isInSafeMode() {
+return false;
+  }
+
+  @Override
+  public boolean isRunning() {
+return true;
+  }
+};
+
+FileIdCollector fileIDCollector =
+new IntraSPSNameNodeFileIdCollector(fsDir, sps);
+sps.init(ctxt, fileIDCollector, null);
+sps.getStorageMovementQueue().activate();
+
 INode rootINode = fsDir.getINode("/root");
-movmentNeededQueue.addToPendingDirQueue(rootINode.getId());
-movmentNeededQueue.init(fsDir);
+hdfsCluster.getNamesystem().getBlockManager()
+.addSPSPathId(rootINode.getId());
 
 //Wait for thread to reach U.
 Thread.sleep(1000);
@@ -1321,7 +1329,7 @@ public class TestStoragePolicySatisfier {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1332,7 +1340,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and R,S should not be added in
 // queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1352,6 +1360,7 @@ public class 

[26/50] [abbrv] hadoop git commit: HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-12146. [SPS]: Fix 
TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks.
 Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6402d5d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6402d5d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6402d5d8

Branch: refs/heads/HDFS-10285
Commit: 6402d5d895c4825e9b3f0e24f8cc111b0c3481c6
Parents: f997f61
Author: Rakesh Radhakrishnan 
Authored: Mon Jul 17 22:40:03 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:49 2018 +0530

--
 .../server/namenode/TestStoragePolicySatisfier.java |  9 +
 .../TestStoragePolicySatisfierWithStripedFile.java  | 16 
 2 files changed, 13 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6402d5d8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index be7236b..10ceae7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
   list.add(cluster.stopDataNode(0));
   list.add(cluster.stopDataNode(0));
   cluster.restartNameNodes();
-  cluster.restartDataNode(list.get(0), true);
-  cluster.restartDataNode(list.get(1), true);
+  cluster.restartDataNode(list.get(0), false);
+  cluster.restartDataNode(list.get(1), false);
   cluster.waitActive();
   fs.satisfyStoragePolicy(filePath);
-  Thread.sleep(3000 * 6);
-  cluster.restartDataNode(list.get(2), true);
+  DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+  StorageType.ARCHIVE, 2, 3, cluster.getFileSystem());
+  cluster.restartDataNode(list.get(2), false);
   DFSTestUtil.waitExpectedStorageType(filePath.toString(),
   StorageType.ARCHIVE, 3, 3, cluster.getFileSystem());
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6402d5d8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index f905ead..c070113 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
*/
   @Test(timeout = 30)
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
+// start 9 datanodes
+int numOfDatanodes = 9;
 int storagesPerDatanode = 2;
 long capacity = 20 * defaultStripeBlockSize;
 long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
-{StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE}})
 .storageCapacities(capacities)
 .build();
@@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
   }
   cluster.restartNameNodes();
   // Restart half datanodes
-  for (int i = 0; i < numOfDatanodes / 2; i++) {
-cluster.restartDataNode(list.get(i), true);
+  for (int i = 0; i < 5; i++) {
+cluster.restartDataNode(list.get(i), false);
   }
   cluster.waitActive();
   fs.satisfyStoragePolicy(fooFile);
-  Thread.sleep(3000 * 6);
+  DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
+  StorageType.ARCHIVE, 5, 3, cluster.getFileSystem());
 

[04/50] [abbrv] hadoop git commit: HDFS-11248: [SPS]: Handle partial block location movements. Contributed by Rakesh R

2018-01-23 Thread rakeshr
HDFS-11248: [SPS]: Handle partial block location movements. Contributed by 
Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/62e0251e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/62e0251e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/62e0251e

Branch: refs/heads/HDFS-10285
Commit: 62e0251ee0be029b9198408ced72e4c50197f428
Parents: d648d35
Author: Uma Maheswara Rao G 
Authored: Wed Dec 28 23:21:07 2016 -0800
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:10:06 2018 +0530

--
 .../datanode/BlockStorageMovementTracker.java   |  15 --
 .../datanode/StoragePolicySatisfyWorker.java|  15 +-
 .../BlockStorageMovementAttemptedItems.java | 206 +-
 .../server/namenode/StoragePolicySatisfier.java | 215 +--
 .../TestBlockStorageMovementAttemptedItems.java | 101 -
 .../namenode/TestStoragePolicySatisfier.java|  63 +-
 6 files changed, 454 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e0251e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 2de88fc..bd35b09 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -28,7 +28,6 @@ import java.util.concurrent.Future;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import 
org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementResult;
-import 
org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlockMovementStatus;
 import 
org.apache.hadoop.hdfs.server.datanode.StoragePolicySatisfyWorker.BlocksMovementsCompletionHandler;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -109,20 +108,6 @@ public class BlockStorageMovementTracker implements 
Runnable {
 }
   }
 
-  /**
-   * Mark as block movement failure for the given trackId and blockId.
-   *
-   * @param trackId tracking id
-   * @param blockId block id
-   */
-  void markBlockMovementFailure(long trackId, long blockId) {
-LOG.debug("Mark as block movement failure for the given "
-+ "trackId:{} and blockId:{}", trackId, blockId);
-BlockMovementResult result = new BlockMovementResult(trackId, blockId, 
null,
-BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_FAILURE);
-addMovementResultToTrackIdList(result);
-  }
-
   private List addMovementResultToTrackIdList(
   BlockMovementResult result) {
 long trackId = result.getTrackId();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/62e0251e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 19f3fe2..10adbfd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -154,18 +154,9 @@ public class StoragePolicySatisfyWorker {
   Collection blockMovingInfos) {
 LOG.debug("Received BlockMovingTasks {}", blockMovingInfos);
 for (BlockMovingInfo blkMovingInfo : blockMovingInfos) {
-  // Iterating backwards. This is to ensure that all the block src location
-  // which doesn't have a target node will be marked as failure before
-  // scheduling the block movement to valid target nodes.
-  for (int i = blkMovingInfo.getSources().length - 1; i >= 0; i--) {
-if (i >= blkMovingInfo.getTargets().length) {
-  // Since there is no target selected for scheduling the block,
-  // just mark this block storage movement as failure. Later, namenode
-  // can take action on this.
-  movementTracker.markBlockMovementFailure(trackID,
-  blkMovingInfo.getBlock().getBlockId());
-  continue;
-  

[14/50] [abbrv] hadoop git commit: HDFS-11338: [SPS]: Fix timeout issue in unit tests caused by longger NN down time. Contributed by Wei Zhou and Rakesh R

2018-01-23 Thread rakeshr
HDFS-11338: [SPS]: Fix timeout issue in unit tests caused by longger NN down 
time. Contributed by Wei Zhou and Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/db3cc98f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/db3cc98f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/db3cc98f

Branch: refs/heads/HDFS-10285
Commit: db3cc98fdf8be7241d120af71131cbcc62935e07
Parents: 46d56ba
Author: Uma Maheswara Rao G 
Authored: Tue Apr 11 14:25:01 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:05 2018 +0530

--
 .../server/blockmanagement/BlockManager.java| 13 +--
 .../BlockStorageMovementAttemptedItems.java | 25 +
 .../hdfs/server/namenode/FSNamesystem.java  |  2 +-
 .../server/namenode/StoragePolicySatisfier.java | 38 ++--
 .../TestBlockStorageMovementAttemptedItems.java |  3 +-
 5 files changed, 60 insertions(+), 21 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3cc98f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0c7b982..dd491cd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -735,7 +735,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   public void close() {
 if (sps != null) {
-  sps.stop(false);
+  sps.deactivate(false);
 }
 bmSafeMode.close();
 try {
@@ -750,6 +750,7 @@ public class BlockManager implements BlockStatsMXBean {
 datanodeManager.close();
 pendingReconstruction.stop();
 blocksMap.close();
+stopSPSGracefully();
   }
 
   /** @return the datanodeManager */
@@ -5060,10 +5061,18 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already stopped.");
   return;
 }
-sps.stop(true);
+sps.deactivate(true);
   }
 
   /**
+   * Timed wait to stop storage policy satisfier daemon threads.
+   */
+  public void stopSPSGracefully() {
+if (sps != null) {
+  sps.stopGracefully();
+}
+  }
+  /**
* @return True if storage policy satisfier running.
*/
   public boolean isStoragePolicySatisfierRunning() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/db3cc98f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index f15db73..26b98d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -130,21 +130,34 @@ public class BlockStorageMovementAttemptedItems {
   }
 
   /**
-   * Stops the monitor thread.
+   * Sets running flag to false. Also, this will interrupt monitor thread and
+   * clear all the queued up tasks.
*/
-  public synchronized void stop() {
+  public synchronized void deactivate() {
 monitorRunning = false;
 if (timerThread != null) {
   timerThread.interrupt();
-  try {
-timerThread.join(3000);
-  } catch (InterruptedException ie) {
-  }
 }
 this.clearQueues();
   }
 
   /**
+   * Timed wait to stop monitor thread.
+   */
+  synchronized void stopGracefully() {
+if (timerThread == null) {
+  return;
+}
+if (monitorRunning) {
+  deactivate();
+}
+try {
+  timerThread.join(3000);
+} catch (InterruptedException ie) {
+}
+  }
+
+  /**
* This class contains information of an attempted trackID. Information such
* as, (a)last attempted time stamp, (b)whether all the blocks in the trackID
* were attempted and blocks movement has been scheduled to satisfy storage


[46/50] [abbrv] hadoop git commit: HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bcce97f8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bcce97f8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bcce97f8

Branch: refs/heads/HDFS-10285
Commit: bcce97f87a9aeb2cf20acef0cc3d22dab1f5120c
Parents: 343d9cbd
Author: Surendra Singh Lilhore 
Authored: Mon Jan 8 15:13:11 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 12:27:17 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../blockmanagement/DatanodeDescriptor.java |   2 +-
 .../server/blockmanagement/DatanodeManager.java |  22 ++
 .../server/namenode/FSDirStatAndListingOp.java  |   1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  44 ++-
 .../hdfs/server/namenode/IntraNNSPSContext.java |  41 --
 .../hadoop/hdfs/server/namenode/Namesystem.java |  24 ++
 .../sps/BlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/BlockStorageMovementNeeded.java |  48 ++-
 .../hdfs/server/namenode/sps/Context.java   | 181 +
 .../namenode/sps/IntraSPSNameNodeContext.java   | 220 +++
 .../namenode/sps/StoragePolicySatisfier.java| 374 +--
 .../TestBlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/TestStoragePolicySatisfier.java |  25 +-
 14 files changed, 742 insertions(+), 290 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bcce97f8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 5ee869e..e97fb46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.Context;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -440,6 +441,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
+  private Context spsctxt = null;
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -485,8 +487,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
-sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
+spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
+sps = new StoragePolicySatisfier(spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5032,8 +5034,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(false);
+// TODO: FSDirectory will get removed via HDFS-12911 modularization work
+sps.start(false, namesystem.getFSDirectory());
   }
 
   /**
@@ -5069,8 +5071,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(true);
+// TODO: FSDirectory will get removed via HDFS-12911 

[31/50] [abbrv] hadoop git commit: HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy storage policy of all the files under the given dir. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2808be4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 3375590..57e9f94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -21,6 +21,9 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
 import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.slf4j.LoggerFactory.getLogger;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
@@ -61,8 +64,10 @@ import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Assert;
 import org.junit.Test;
+import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
 
 import com.google.common.base.Supplier;
 
@@ -71,6 +76,12 @@ import com.google.common.base.Supplier;
  * moved and finding its suggested target locations to move.
  */
 public class TestStoragePolicySatisfier {
+
+  {
+GenericTestUtils.setLogLevel(
+getLogger(FSTreeTraverser.class), Level.DEBUG);
+  }
+
   private static final String ONE_SSD = "ONE_SSD";
   private static final String COLD = "COLD";
   private static final Logger LOG =
@@ -341,7 +352,9 @@ public class TestStoragePolicySatisfier {
 
   // take no effect for the sub-dir's file in the directory.
   DFSTestUtil.waitExpectedStorageType(
-  subFile2, StorageType.DEFAULT, 3, 3, dfs);
+  subFile2, StorageType.SSD, 1, 3, dfs);
+  DFSTestUtil.waitExpectedStorageType(
+  subFile2, StorageType.DISK, 2, 3, dfs);
 } finally {
   shutdownCluster();
 }
@@ -1083,6 +1096,368 @@ public class TestStoragePolicySatisfier {
 }
   }
 
+  /**
+   * Test SPS for empty directory, xAttr should be removed.
+   */
+  @Test(timeout = 30)
+  public void testSPSForEmptyDirectory() throws IOException, TimeoutException,
+  InterruptedException {
+MiniDFSCluster cluster = null;
+try {
+  Configuration conf = new Configuration();
+  conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+  true);
+  cluster = new MiniDFSCluster.Builder(conf).build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path emptyDir = new Path("/emptyDir");
+  fs.mkdirs(emptyDir);
+  fs.satisfyStoragePolicy(emptyDir);
+  // Make sure satisfy xattr has been removed.
+  DFSTestUtil.waitForXattrRemoved("/emptyDir",
+  XATTR_SATISFY_STORAGE_POLICY, cluster.getNamesystem(), 3);
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
+  /**
+   * Test SPS for not exist directory.
+   */
+  @Test(timeout = 30)
+  public void testSPSForNonExistDirectory() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  Configuration conf = new Configuration();
+  conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+  true);
+  cluster = new MiniDFSCluster.Builder(conf).build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path emptyDir = new Path("/emptyDir");
+  try {
+fs.satisfyStoragePolicy(emptyDir);
+fail("FileNotFoundException should throw");
+  } catch (FileNotFoundException e) {
+// nothing to do
+  }
+} finally {
+  if (cluster != null) {
+cluster.shutdown();
+  }
+}
+  }
+
+  /**
+   * Test SPS for directory tree which doesn't have files.
+   */
+  @Test(timeout = 30)
+  public void testSPSWithDirectoryTreeWithoutFile() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  Configuration conf = new Configuration();
+  conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+  true);
+  cluster = new MiniDFSCluster.Builder(conf).build();
+  cluster.waitActive();
+  // Create directories
+  /*
+   *   root
+   *|
+   *   ACD
+   *|
+   *   GHI
+   *|
+ 

[49/50] [abbrv] hadoop git commit: HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. Contributed by Uma Maheswara Rao G.

2018-01-23 Thread rakeshr
HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. 
Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6980058e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6980058e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6980058e

Branch: refs/heads/HDFS-10285
Commit: 6980058e1b44e2200301d6254eeb98f20ba91b14
Parents: 3e280b9
Author: Rakesh Radhakrishnan 
Authored: Tue Jan 23 20:09:26 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 12:27:42 2018 +0530

--
 .../sps/BlockStorageMovementNeeded.java |  70 +++-
 .../hdfs/server/namenode/sps/Context.java   |   8 +
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   2 +
 .../namenode/sps/IntraSPSNameNodeContext.java   |   7 +
 .../sps/IntraSPSNameNodeFileIdCollector.java|   6 +-
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java|   8 +-
 .../server/sps/ExternalSPSFileIDCollector.java  | 156 +
 .../hadoop/hdfs/server/sps/package-info.java|  28 ++
 .../sps/TestStoragePolicySatisfier.java | 323 ++-
 .../sps/TestExternalStoragePolicySatisfier.java | 108 +++
 11 files changed, 556 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6980058e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index 39a0051..b141502 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -97,23 +97,53 @@ public class BlockStorageMovementNeeded {
   }
 
   /**
-   * Add the itemInfo to tracking list for which storage movement
-   * expected if necessary.
+   * Add the itemInfo list to tracking list for which storage movement expected
+   * if necessary.
+   *
* @param startId
-   *- start id
+   *  - start id
* @param itemInfoList
-   *- List of child in the directory
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the start id directory has no more elements to
+   *  scan.
*/
   @VisibleForTesting
-  public synchronized void addAll(long startId,
-  List itemInfoList, boolean scanCompleted) {
+  public synchronized void addAll(long startId, List itemInfoList,
+  boolean scanCompleted) {
 storageMovementNeeded.addAll(itemInfoList);
+updatePendingDirScanStats(startId, itemInfoList.size(), scanCompleted);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement expected if
+   * necessary.
+   *
+   * @param itemInfoList
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the ItemInfo start id directory has no more
+   *  elements to scan.
+   */
+  @VisibleForTesting
+  public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
+storageMovementNeeded.add(itemInfo);
+// This represents sps start id is file, so no need to update pending dir
+// stats.
+if (itemInfo.getStartId() == itemInfo.getFileId()) {
+  return;
+}
+updatePendingDirScanStats(itemInfo.getStartId(), 1, scanCompleted);
+  }
+
+  private void updatePendingDirScanStats(long startId, int numScannedFiles,
+  boolean scanCompleted) {
 DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
 if (pendingWork == null) {
   pendingWork = new DirPendingWorkInfo();
   pendingWorkForDirectory.put(startId, pendingWork);
 }
-pendingWork.addPendingWorkCount(itemInfoList.size());
+pendingWork.addPendingWorkCount(numScannedFiles);
 if (scanCompleted) {
   pendingWork.markScanCompleted();
 }
@@ -250,13 +280,15 @@ public class BlockStorageMovementNeeded {
 
 @Override
 public void run() {
-  LOG.info("Starting FileInodeIdCollector!.");
+  LOG.info("Starting SPSPathIdProcessor!.");
   long lastStatusCleanTime = 0;
+  Long startINodeId = null;
   while (ctxt.isRunning()) {
-LOG.info("Running FileInodeIdCollector!.");
 try {
   if (!ctxt.isInSafeMode()) {
-Long startINodeId = 

[36/50] [abbrv] hadoop git commit: HDFS-12556: [SPS] : Block movement analysis should be done in read lock.

2018-01-23 Thread rakeshr
HDFS-12556: [SPS] : Block movement analysis should be done in read lock.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9318ad9b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9318ad9b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9318ad9b

Branch: refs/heads/HDFS-10285
Commit: 9318ad9b2a3ada4e9358e7d9ae4d39748876936f
Parents: 8fb4a3d
Author: Surendra Singh Lilhore 
Authored: Sat Oct 14 15:11:26 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:23:21 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 27 +---
 .../TestPersistentStoragePolicySatisfier.java   |  2 +-
 2 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9318ad9b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a28a806..cbfba44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -242,12 +242,25 @@ public class StoragePolicySatisfier implements Runnable {
   ItemInfo itemInfo = storageMovementNeeded.get();
   if (itemInfo != null) {
 long trackId = itemInfo.getTrackId();
-BlockCollection blockCollection =
-namesystem.getBlockCollection(trackId);
-// Check blockCollectionId existence.
+BlockCollection blockCollection;
+BlocksMovingAnalysis status = null;
+try {
+  namesystem.readLock();
+  blockCollection = namesystem.getBlockCollection(trackId);
+  // Check blockCollectionId existence.
+  if (blockCollection == null) {
+// File doesn't exists (maybe got deleted), remove trackId from
+// the queue
+storageMovementNeeded.removeItemTrackInfo(itemInfo);
+  } else {
+status =
+analyseBlocksStorageMovementsAndAssignToDN(
+blockCollection);
+  }
+} finally {
+  namesystem.readUnlock();
+}
 if (blockCollection != null) {
-  BlocksMovingAnalysis status =
-  analyseBlocksStorageMovementsAndAssignToDN(blockCollection);
   switch (status.status) {
   // Just add to monitor, so it will be retried after timeout
   case ANALYSIS_SKIPPED_FOR_RETRY:
@@ -283,10 +296,6 @@ public class StoragePolicySatisfier implements Runnable {
 storageMovementNeeded.removeItemTrackInfo(itemInfo);
 break;
   }
-} else {
-  // File doesn't exists (maybe got deleted), remove trackId from
-  // the queue
-  storageMovementNeeded.removeItemTrackInfo(itemInfo);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9318ad9b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 5bce296..7165d06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -72,7 +72,7 @@ public class TestPersistentStoragePolicySatisfier {
   {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD}
   };
 
-  private final int timeout = 30;
+  private final int timeout = 9;
 
   /**
* Setup environment for every test case.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/343d9cbd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
deleted file mode 100644
index 6991ad2..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
- * to be moved and finding its expected target locations in order to satisfy 
the
- * storage policy.
- */
-public class TestStoragePolicySatisfierWithStripedFile {
-
-  private static final Logger LOG = LoggerFactory
-  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
-
-  private final int stripesPerBlock = 2;
-
-  private ErasureCodingPolicy ecPolicy;
-  private int dataBlocks;
-  private int parityBlocks;
-  private int cellSize;
-  private int defaultStripeBlockSize;
-
-  private ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
-  }
-
-  /**
-   * Initialize erasure coding policy.
-   */
-  @Before
-  public void init(){
-ecPolicy = getEcPolicy();
-dataBlocks = ecPolicy.getNumDataUnits();
-parityBlocks = ecPolicy.getNumParityUnits();
-cellSize = ecPolicy.getCellSize();
-defaultStripeBlockSize = cellSize * stripesPerBlock;
-  }
-
-  /**
-   * Tests to verify that all the striped blocks(data + parity blocks) are
-   * moving to satisfy the storage policy.
-   */
-  @Test(timeout = 30)
-  public void testMoverWithFullStripe() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
-int storagesPerDatanode = 2;
-long capacity = 20 * defaultStripeBlockSize;
-long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
-for (int i = 0; i < numOfDatanodes; i++) {
-  for (int j = 0; j < storagesPerDatanode; j++) {
-capacities[i][j] = capacity;
-  }
-}
-
-final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
-initConfWithStripe(conf, defaultStripeBlockSize);
-final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-.numDataNodes(numOfDatanodes)
-.storagesPerDatanode(storagesPerDatanode)
-   

[25/50] [abbrv] hadoop git commit: HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G

2018-01-23 Thread rakeshr
HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f997f612
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f997f612
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f997f612

Branch: refs/heads/HDFS-10285
Commit: f997f612160be4e13b8633517f82c677bd7a566f
Parents: d770a2f
Author: Rakesh Radhakrishnan 
Authored: Fri Jul 14 22:36:09 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:45 2018 +0530

--
 .../src/site/markdown/ArchivalStorage.md| 51 ++--
 1 file changed, 48 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f997f612/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index f1895fc..668bb20 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -95,8 +95,44 @@ The effective storage policy can be retrieved by the 
"[`storagepolicies -getStor
 
 The default storage type of a datanode storage location will be DISK if it 
does not have a storage type tagged explicitly.
 
-Mover - A New Data Migration Tool
--
+Storage Policy Based Data Movement
+--
+
+Setting a new storage policy on already existing file/dir will change the 
policy in Namespace, but it will not move the blocks physically across storage 
medias.
+Following 2 options will allow users to move the blocks based on new policy 
set. So, once user change/set to a new policy on file/directory, user should 
also perform one of the following options to achieve the desired data movement. 
Note that both options cannot be allowed to run simultaneously.
+
+### Storage Policy Satisfier (SPS)
+
+When user changes the storage policy on a file/directory, user can call 
`HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new 
policy set.
+The SPS daemon thread runs along with namenode and periodically scans for the 
storage mismatches between new policy set and the physical blocks placed. This 
will only track the files/directories for which user invoked 
satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, 
then it will schedule block movement tasks to datanodes. A Coordinator 
DataNode(C-DN) will track all block movements associated to a file and notify 
to namenode about movement success/failure. If there are any failures in 
movement, the SPS will re-attempt by sending new block movement task.
+
+SPS can be activated and deactivated dynamically without restarting the 
Namenode.
+
+Detailed design documentation can be found at [Storage Policy Satisfier(SPS) 
(HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
+
+* **Note**: When user invokes `satisfyStoragePolicy()` API on a directory, SPS 
will consider the files which are immediate to that directory. Sub-directories 
won't be considered for satisfying the policy. Its user responsibility to call 
this API on directories recursively, to track all files under the sub tree.
+
+* HdfsAdmin API :
+`public void satisfyStoragePolicy(final Path path) throws IOException`
+
+* Arguments :
+
+| | |
+|: |: |
+| `path` | A path which requires blocks storage movement. |
+
+Configurations:
+
+*   **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate 
SPS. Configuring true represents SPS is
+   activated and vice versa.
+
+*   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to 
re-check the processed block storage movement
+   command results from Co-ordinator Datanode.
+
+*   **dfs.storage.policy.satisfier.self.retry.timeout.millis** - A timeout to 
retry if no block movement results reported from
+   Co-ordinator Datanode in this configured timeout.
+
+### Mover - A New Data Migration Tool
 
 A new data migration tool is added for archiving data. The tool is similar to 
Balancer. It periodically scans the files in HDFS to check if the block 
placement satisfies the storage policy. For the blocks violating the storage 
policy, it moves the replicas to a different storage type in order to fulfill 
the storage policy requirement. Note that it always tries to move block 
replicas within the same node whenever possible. If that is not possible (e.g. 
when a node doesn’t have the target storage type) then it will copy the block 
replicas to another node over the network.
 
@@ 

[16/50] [abbrv] hadoop git commit: HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy for a file. Contributed by Uma Maheswara Rao G

2018-01-23 Thread rakeshr
HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy 
for a file. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/84db58f6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/84db58f6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/84db58f6

Branch: refs/heads/HDFS-10285
Commit: 84db58f6f86695c0964a7b1a16437f603ed35ce2
Parents: dc2bb36
Author: Rakesh Radhakrishnan 
Authored: Thu Apr 20 23:14:36 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:11 2018 +0530

--
 .../BlockStorageMovementAttemptedItems.java |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 116 ++-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  35 ++
 .../TestPersistentStoragePolicySatisfier.java   |  52 +
 .../namenode/TestStoragePolicySatisfier.java|  76 
 5 files changed, 225 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/84db58f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index f2406da..bf7859c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -333,7 +333,7 @@ public class BlockStorageMovementAttemptedItems {
   + "doesn't exists in storageMovementAttemptedItems list",
   storageMovementAttemptedResult.getTrackId());
   // Remove xattr for the track id.
-  this.sps.notifyBlkStorageMovementFinished(
+  this.sps.postBlkStorageMovementCleanup(
   storageMovementAttemptedResult.getTrackId());
 }
 break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/84db58f6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 8be0a2a..3b20314 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -79,6 +79,27 @@ public class StoragePolicySatisfier implements Runnable {
   private final BlockStorageMovementAttemptedItems storageMovementsMonitor;
   private volatile boolean isRunning = false;
 
+  /**
+   * Represents the collective analysis status for all blocks.
+   */
+  private enum BlocksMovingAnalysisStatus {
+// Represents that, the analysis skipped due to some conditions. A such
+// condition is if block collection is in incomplete state.
+ANALYSIS_SKIPPED_FOR_RETRY,
+// Represents that, all block storage movement needed blocks found its
+// targets.
+ALL_BLOCKS_TARGETS_PAIRED,
+// Represents that, only fewer or none of the block storage movement needed
+// block found its eligible targets.
+FEW_BLOCKS_TARGETS_PAIRED,
+// Represents that, none of the blocks found for block storage movements.
+BLOCKS_ALREADY_SATISFIED,
+// Represents that, the analysis skipped due to some conditions.
+// Example conditions are if no blocks really exists in block collection or
+// if analysis is not required on ec files with unsuitable storage policies
+BLOCKS_TARGET_PAIRING_SKIPPED;
+  }
+
   public StoragePolicySatisfier(final Namesystem namesystem,
   final BlockStorageMovementNeeded storageMovementNeeded,
   final BlockManager blkManager, Configuration conf) {
@@ -208,10 +229,31 @@ public class StoragePolicySatisfier implements Runnable {
 namesystem.getBlockCollection(blockCollectionID);
 // Check blockCollectionId existence.
 if (blockCollection != null) {
-  boolean allBlockLocsAttemptedToSatisfy =
-  
computeAndAssignStorageMismatchedBlocksToDNs(blockCollection);
-  

[06/50] [abbrv] hadoop git commit: HDFS-11289. [SPS]: Make SPS movement monitor timeouts configurable. Contributed by Uma Maheswara Rao G

2018-01-23 Thread rakeshr
HDFS-11289. [SPS]: Make SPS movement monitor timeouts configurable. Contributed 
by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/364729b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/364729b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/364729b3

Branch: refs/heads/HDFS-10285
Commit: 364729b3f9c4507c5d50456902d183ba2599e354
Parents: 357c99c
Author: Rakesh Radhakrishnan 
Authored: Mon Jan 9 19:07:43 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:10:12 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  9 
 .../server/blockmanagement/BlockManager.java|  4 ++--
 .../BlockStorageMovementAttemptedItems.java | 10 -
 .../server/namenode/StoragePolicySatisfier.java | 15 -
 .../src/main/resources/hdfs-default.xml | 23 
 5 files changed, 49 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/364729b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 8ffcefe..3d54706 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -597,10 +597,19 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.mover.max-no-move-interval";
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
+  // SPS related configurations
   public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
   "dfs.storage.policy.satisfier.activate";
   public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
   true;
+  public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
+  "dfs.storage.policy.satisfier.recheck.timeout.millis";
+  public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =
+  5 * 60 * 1000;
+  public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
+  "dfs.storage.policy.satisfier.self.retry.timeout.millis";
+  public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
+  30 * 60 * 1000;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/364729b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c68b86a..f108c9d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -485,8 +485,8 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT);
 if (storagePolicyEnabled && spsEnabled) {
-  sps = new StoragePolicySatisfier(namesystem,
-  storageMovementNeeded, this);
+  sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
+  conf);
 } else {
   sps = null;
   LOG.warn(

http://git-wip-us.apache.org/repos/asf/hadoop/blob/364729b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index ce97075..042aca3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 

[02/50] [abbrv] hadoop git commit: HDFS-11123. [SPS] Make storage policy satisfier daemon work on/off dynamically. Contributed by Uma Maheswara Rao G

2018-01-23 Thread rakeshr
HDFS-11123. [SPS] Make storage policy satisfier daemon work on/off dynamically. 
Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab9525f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab9525f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab9525f4

Branch: refs/heads/HDFS-10285
Commit: ab9525f4f8727a74098ad00e2ce626a410c90199
Parents: 81a3426
Author: Rakesh Radhakrishnan 
Authored: Wed Dec 14 17:49:44 2016 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:09:59 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  45 +
 .../BlockStorageMovementAttemptedItems.java |  24 +++--
 .../namenode/BlockStorageMovementNeeded.java|   4 +
 .../hdfs/server/namenode/FSDirAttrOp.java   |   8 --
 .../hdfs/server/namenode/FSNamesystem.java  |  35 ++-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  26 -
 .../hdfs/server/namenode/NameNodeRpcServer.java |   4 +-
 .../server/namenode/StoragePolicySatisfier.java |  45 +++--
 .../src/main/resources/hdfs-default.xml |   7 +-
 .../namenode/TestNameNodeReconfigure.java   | 100 +++
 .../apache/hadoop/hdfs/tools/TestDFSAdmin.java  |   2 +-
 11 files changed, 265 insertions(+), 35 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab9525f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1cac1cc..c68b86a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -5019,7 +5019,52 @@ public class BlockManager implements BlockStatsMXBean {
 }
   }
 
+  /**
+   * Gets the storage policy satisfier instance.
+   *
+   * @return sps
+   */
   public StoragePolicySatisfier getStoragePolicySatisfier() {
 return sps;
   }
+
+  /**
+   * Activate the storage policy satisfier by starting its service.
+   */
+  public void activateSPS() {
+if (sps == null) {
+  LOG.info("Storage policy satisfier is not initialized.");
+  return;
+} else if (sps.isRunning()) {
+  LOG.info("Storage policy satisfier is already running.");
+  return;
+}
+sps.start();
+  }
+
+  /**
+   * Deactivate the storage policy satisfier by stopping its services.
+   */
+  public void deactivateSPS() {
+if (sps == null) {
+  LOG.info("Storage policy satisfier is not initialized.");
+  return;
+} else if (!sps.isRunning()) {
+  LOG.info("Storage policy satisfier is already stopped.");
+  return;
+}
+sps.stop();
+// TODO: add command to DNs for stop in-progress processing SPS commands?
+// to avoid confusions in cluster, I think sending commands from 
centralized
+// place would be better to drop pending queues at DN. Anyway in progress
+// work will be finished in a while, but this command can void starting
+// fresh movements at DN.
+  }
+
+  /**
+   * @return True if storage policy satisfier running.
+   */
+  public boolean isStoragePolicySatisfierRunning() {
+return sps == null ? false : sps.isRunning();
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab9525f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 5457dc2..bb26082 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -49,7 +49,7 @@ public class BlockStorageMovementAttemptedItems {
   // processing and sent to DNs.
   private final Map storageMovementAttemptedItems;
   private final List 
storageMovementAttemptedResults;
-  private volatile boolean spsRunning = true;
+  private volatile boolean monitorRunning = true;
   private Daemon timerThread = null;
   

[45/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/343d9cbd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/343d9cbd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/343d9cbd

Branch: refs/heads/HDFS-10285
Commit: 343d9cbdca78698b443ba7f924b85cd1a059784f
Parents: 1fd6f96
Author: Uma Maheswara Rao G 
Authored: Fri Dec 22 09:10:12 2017 -0800
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:32:31 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|6 +-
 .../BlockStorageMovementAttemptedItems.java |  241 ---
 .../namenode/BlockStorageMovementNeeded.java|  574 --
 .../hdfs/server/namenode/FSNamesystem.java  |1 +
 .../hdfs/server/namenode/IntraNNSPSContext.java |   41 +
 .../server/namenode/StoragePolicySatisfier.java |  973 --
 .../sps/BlockStorageMovementAttemptedItems.java |  241 +++
 .../sps/BlockStorageMovementNeeded.java |  572 ++
 .../namenode/sps/StoragePolicySatisfier.java|  988 ++
 .../hdfs/server/namenode/sps/package-info.java  |   28 +
 .../TestBlockStorageMovementAttemptedItems.java |  196 --
 .../namenode/TestStoragePolicySatisfier.java| 1775 -
 ...stStoragePolicySatisfierWithStripedFile.java |  580 --
 .../TestBlockStorageMovementAttemptedItems.java |  196 ++
 .../sps/TestStoragePolicySatisfier.java | 1779 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  580 ++
 16 files changed, 4430 insertions(+), 4341 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/343d9cbd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f9925bc..5ee869e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -484,7 +485,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, this, conf);
+StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
+sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/343d9cbd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
deleted file mode 100644
index 643255f..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See 

[41/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/343d9cbd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
new file mode 100644
index 000..8dc52dc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -0,0 +1,1779 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
+ * moved and finding its suggested target locations to move.
+ */
+public class TestStoragePolicySatisfier {
+
+  {
+GenericTestUtils.setLogLevel(
+getLogger(FSTreeTraverser.class), Level.DEBUG);
+  }
+
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
+  private final Configuration config = new HdfsConfiguration();
+  private StorageType[][] allDiskTypes =
+ 

[08/50] [abbrv] hadoop git commit: HDFS-11150: [SPS]: Provide persistence when satisfying storage policy. Contributed by Yuanbo Liu

2018-01-23 Thread rakeshr
HDFS-11150: [SPS]: Provide persistence when satisfying storage policy. 
Contributed by Yuanbo Liu


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4be29a6c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4be29a6c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4be29a6c

Branch: refs/heads/HDFS-10285
Commit: 4be29a6c04be700b9d5c2786313a79c74bb946bb
Parents: c58ca3a
Author: Uma Maheswara Rao G 
Authored: Wed Jan 11 13:48:58 2017 -0800
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:10:19 2018 +0530

--
 .../hadoop/hdfs/protocol/ClientProtocol.java|   2 +-
 .../hdfs/server/common/HdfsServerConstants.java |   3 +
 .../hdfs/server/namenode/FSDirAttrOp.java   |  81 +++--
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   8 +
 .../hdfs/server/namenode/FSDirectory.java   |  14 +
 .../hdfs/server/namenode/FSNamesystem.java  |   6 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 +-
 .../server/namenode/StoragePolicySatisfier.java |  22 +-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  37 +++
 .../TestPersistentStoragePolicySatisfier.java   | 311 +++
 .../namenode/TestStoragePolicySatisfier.java| 112 +++
 ...stStoragePolicySatisfierWithStripedFile.java |  17 +-
 12 files changed, 532 insertions(+), 94 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4be29a6c/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index b2f5df4..0a2404c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1743,7 +1743,7 @@ public interface ClientProtocol {
* @throws org.apache.hadoop.hdfs.server.namenode.SafeModeException append 
not
*   allowed in safemode.
*/
-  @Idempotent
+  @AtMostOnce
   void satisfyStoragePolicy(String path) throws IOException;
 
   /**

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4be29a6c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
index e486317..42a2fc6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
@@ -365,6 +365,9 @@ public interface HdfsServerConstants {
   String XATTR_ERASURECODING_POLICY =
   "system.hdfs.erasurecoding.policy";
 
+  String XATTR_SATISFY_STORAGE_POLICY =
+  "system.hdfs.satisfy.storage.policy";
+
   Path MOVER_ID_PATH = new Path("/system/mover.id");
 
   long BLOCK_GROUP_INDEX_MASK = 15;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4be29a6c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 448a305..518c17e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -42,12 +43,14 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 

[21/50] [abbrv] hadoop git commit: HDFS-11966. [SPS] Correct the log in BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-11966. [SPS] Correct the log in 
BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed 
by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/488da8d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/488da8d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/488da8d3

Branch: refs/heads/HDFS-10285
Commit: 488da8d34968d2cc6618d0e26e03257d5c1fce08
Parents: 2016003
Author: Rakesh Radhakrishnan 
Authored: Sun Jun 18 11:00:28 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:29 2018 +0530

--
 .../BlockStorageMovementAttemptedItems.java | 39 ++--
 1 file changed, 20 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/488da8d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index bf7859c..6048986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -296,19 +296,17 @@ public class BlockStorageMovementAttemptedItems {
 .next();
 synchronized (storageMovementAttemptedItems) {
   Status status = storageMovementAttemptedResult.getStatus();
+  long trackId = storageMovementAttemptedResult.getTrackId();
   ItemInfo itemInfo;
   switch (status) {
   case FAILURE:
-blockStorageMovementNeeded
-.add(storageMovementAttemptedResult.getTrackId());
+blockStorageMovementNeeded.add(trackId);
 LOG.warn("Blocks storage movement results for the tracking id: {}"
 + " is reported from co-ordinating datanode, but result"
-+ " status is FAILURE. So, added for retry",
-storageMovementAttemptedResult.getTrackId());
++ " status is FAILURE. So, added for retry", trackId);
 break;
   case SUCCESS:
-itemInfo = storageMovementAttemptedItems
-.get(storageMovementAttemptedResult.getTrackId());
+itemInfo = storageMovementAttemptedItems.get(trackId);
 
 // ItemInfo could be null. One case is, before the blocks movements
 // result arrives the attempted trackID became timed out and then
@@ -318,20 +316,23 @@ public class BlockStorageMovementAttemptedItems {
 // following condition. If all the block locations under the 
trackID
 // are attempted and failed to find matching target nodes to 
satisfy
 // storage policy in previous SPS iteration.
-if (itemInfo != null
-&& !itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
-  blockStorageMovementNeeded
-  .add(storageMovementAttemptedResult.getTrackId());
-  LOG.warn("Blocks storage movement is SUCCESS for the track id: 
{}"
-  + " reported from co-ordinating datanode. But adding trackID"
-  + " back to retry queue as some of the blocks couldn't find"
-  + " matching target nodes in previous SPS iteration.",
-  storageMovementAttemptedResult.getTrackId());
+String msg = "Blocks storage movement is SUCCESS for the track id: 
"
++ trackId + " reported from co-ordinating datanode.";
+if (itemInfo != null) {
+  if (!itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
+blockStorageMovementNeeded.add(trackId);
+LOG.warn("{} But adding trackID back to retry queue as some of"
++ " the blocks couldn't find matching target nodes in"
++ " previous SPS iteration.", msg);
+  } else {
+LOG.info(msg);
+// Remove xattr for the track id.
+this.sps.postBlkStorageMovementCleanup(
+storageMovementAttemptedResult.getTrackId());
+  }
 } else {
-  LOG.info("Blocks storage movement is SUCCESS for the track id: 
{}"
-  + " reported from co-ordinating datanode. But the trackID "
-  + "doesn't exists in storageMovementAttemptedItems 

[09/50] [abbrv] hadoop git commit: HDFS-11186. [SPS]: Daemon thread of SPS should start only in Active NN. Contributed by Wei Zhou

2018-01-23 Thread rakeshr
HDFS-11186. [SPS]: Daemon thread of SPS should start only in Active NN. 
Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/21972d51
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/21972d51
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/21972d51

Branch: refs/heads/HDFS-10285
Commit: 21972d518b27737c8d743bd785af6eb8850b8fe2
Parents: 4be29a6
Author: Rakesh Radhakrishnan 
Authored: Thu Jan 12 09:01:30 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:12:46 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|   9 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |  11 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   6 +
 .../hadoop/hdfs/server/namenode/NameNode.java   |   7 ++
 .../TestStoragePolicySatisfierWithHA.java   | 109 +++
 5 files changed, 138 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/21972d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f108c9d..cbedae1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -447,9 +447,15 @@ public class BlockManager implements BlockStatsMXBean {
   /** Storages accessible from multiple DNs. */
   private final ProvidedStorageMap providedStorageMap;
 
+  /**
+   * Whether HA is enabled.
+   */
+  private final boolean haEnabled;
+
   public BlockManager(final Namesystem namesystem, boolean haEnabled,
   final Configuration conf) throws IOException {
 this.namesystem = namesystem;
+this.haEnabled = haEnabled;
 datanodeManager = new DatanodeManager(this, namesystem, conf);
 heartbeatManager = datanodeManager.getHeartbeatManager();
 this.blockIdManager = new BlockIdManager(this);
@@ -722,7 +728,7 @@ public class BlockManager implements BlockStatsMXBean {
 this.blockReportThread.start();
 mxBeanName = MBeans.register("NameNode", "BlockStats", this);
 bmSafeMode.activate(blockTotal);
-if (sps != null) {
+if (sps != null && !haEnabled) {
   sps.start();
 }
   }
@@ -5039,6 +5045,7 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
+
 sps.start();
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/21972d51/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
index ce78bde..b4e9716 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/mover/Mover.java
@@ -48,6 +48,7 @@ import 
org.apache.hadoop.hdfs.server.protocol.DatanodeStorageReport;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetworkTopology;
@@ -661,9 +662,13 @@ public class Mover {
   try {
 spsRunning = nnc.getDistributedFileSystem().getClient()
 .isStoragePolicySatisfierRunning();
-  } catch (StandbyException e) {
-System.err.println("Skip Standby Namenode. " + nnc.toString());
-continue;
+  } catch (RemoteException e) {
+IOException cause = e.unwrapRemoteException();
+if (cause instanceof StandbyException) {
+  System.err.println("Skip Standby Namenode. " + nnc.toString());
+  continue;
+}
+throw e;
   }
   if (spsRunning) {
 System.err.println("Mover failed due to StoragePolicySatisfier"


[20/50] [abbrv] hadoop git commit: HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type 
as source and destination in same datanode. Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2016003a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2016003a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2016003a

Branch: refs/heads/HDFS-10285
Commit: 2016003a63c3cd33bc51e5d78645b231632b8ba3
Parents: 94c1f0f
Author: Rakesh Radhakrishnan 
Authored: Fri Jun 9 14:03:13 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:26 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 23 ++
 .../namenode/TestStoragePolicySatisfier.java| 44 
 2 files changed, 58 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2016003a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 9e2a4a0..1b2afa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -501,15 +501,20 @@ public class StoragePolicySatisfier implements Runnable {
 // avoid choosing a target which already has this block.
 for (int i = 0; i < sourceWithStorageList.size(); i++) {
   StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
-  StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo,
-  existingTypeNodePair.dn, expected);
-  if (chosenTarget != null) {
-sourceNodes.add(existingTypeNodePair.dn);
-sourceStorageTypes.add(existingTypeNodePair.storageType);
-targetNodes.add(chosenTarget.dn);
-targetStorageTypes.add(chosenTarget.storageType);
-expected.remove(chosenTarget.storageType);
-// TODO: We can increment scheduled block count for this node?
+
+  // Check whether the block replica is already placed in the expected
+  // storage type in this source datanode.
+  if (!expected.contains(existingTypeNodePair.storageType)) {
+StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
+blockInfo, existingTypeNodePair.dn, expected);
+if (chosenTarget != null) {
+  sourceNodes.add(existingTypeNodePair.dn);
+  sourceStorageTypes.add(existingTypeNodePair.storageType);
+  targetNodes.add(chosenTarget.dn);
+  targetStorageTypes.add(chosenTarget.storageType);
+  expected.remove(chosenTarget.storageType);
+  // TODO: We can increment scheduled block count for this node?
+}
   }
   // To avoid choosing this excludeNodes as targets later
   excludeNodes.add(existingTypeNodePair.dn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2016003a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 8e08a1e..f1a4169 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -764,6 +764,50 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
+   * If replica with expected storage type already exist in source DN then that
+   * DN should be skipped.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
+  throws Exception {
+StorageType[][] diskTypes = new StorageType[][] {
+{StorageType.DISK, StorageType.ARCHIVE},
+{StorageType.DISK, StorageType.ARCHIVE},
+{StorageType.DISK, StorageType.ARCHIVE}};
+
+try {
+  hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+  storagesPerDatanode, capacity);
+  dfs = hdfsCluster.getFileSystem();
+  // 1. Write two replica on disk
+  DFSTestUtil.createFile(dfs, new Path(file), 

[32/50] [abbrv] hadoop git commit: HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy storage policy of all the files under the given dir. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy 
storage policy of all the files under the given dir. Contributed by Surendra 
Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a2808be4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a2808be4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a2808be4

Branch: refs/heads/HDFS-10285
Commit: a2808be44a190de548cac1fd340116fbde249e65
Parents: 44f0659
Author: Uma Maheswara Rao G 
Authored: Sat Sep 30 06:31:52 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:23:10 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  22 +-
 .../BlockStorageMovementAttemptedItems.java |   8 +-
 .../namenode/BlockStorageMovementNeeded.java| 277 +++--
 .../hdfs/server/namenode/FSTreeTraverser.java   | 313 ++
 .../server/namenode/ReencryptionHandler.java| 618 ---
 .../server/namenode/ReencryptionUpdater.java|   2 +-
 .../server/namenode/StoragePolicySatisfier.java |  43 +-
 .../src/main/resources/hdfs-default.xml |  23 +
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../TestPersistentStoragePolicySatisfier.java   |   8 +-
 .../hdfs/server/namenode/TestReencryption.java  |   3 -
 .../namenode/TestReencryptionHandler.java   |  10 +-
 .../namenode/TestStoragePolicySatisfier.java| 377 ++-
 15 files changed, 1260 insertions(+), 457 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2808be4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 828c354..38244fd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -602,6 +602,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.enabled";
   public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
   false;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY =
+  "dfs.storage.policy.satisfier.queue.limit";
+  public static final int  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT =
+  1000;
+  public static final String DFS_SPS_WORK_MULTIPLIER_PER_ITERATION =
+  "dfs.storage.policy.satisfier.work.multiplier.per.iteration";
+  public static final int DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT =
+  1;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a2808be4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index 7465853..570b85d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1422,7 +1422,27 @@ public class DFSUtil {
 "It should be a positive, non-zero integer value.");
 return blocksReplWorkMultiplier;
   }
-  
+
+  /**
+   * Get DFS_SPS_WORK_MULTIPLIER_PER_ITERATION from
+   * configuration.
+   *
+   * @param conf Configuration
+   * @return Value of DFS_SPS_WORK_MULTIPLIER_PER_ITERATION
+   */
+  public static int getSPSWorkMultiplier(Configuration conf) {
+int spsWorkMultiplier = conf
+.getInt(
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION,
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
+Preconditions.checkArgument(
+(spsWorkMultiplier > 0),
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION +
+" = '" + spsWorkMultiplier + "' is invalid. " +
+"It should be a positive, non-zero integer value.");
+return spsWorkMultiplier;
+  }
+
   /**
* Get SPNEGO keytab Key from configuration
* 


[28/50] [abbrv] hadoop git commit: HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to 
improve thread cleanup time. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/eacd760d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/eacd760d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/eacd760d

Branch: refs/heads/HDFS-10285
Commit: eacd760df7b70da3a6e07ede5f8300e8ff646c0e
Parents: 1c01ae2
Author: Uma Maheswara Rao G 
Authored: Wed Jul 19 00:55:26 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:56 2018 +0530

--
 .../datanode/BlockStorageMovementTracker.java   | 16 
 .../server/datanode/StoragePolicySatisfyWorker.java |  5 +++--
 2 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/eacd760d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index c7e952b..f3d2bb6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -77,7 +77,8 @@ public class BlockStorageMovementTracker implements Runnable {
 moverTaskFutures.wait(2000);
   }
 } catch (InterruptedException ignore) {
-  // ignore
+  // Sets interrupt flag of this thread.
+  Thread.currentThread().interrupt();
 }
   }
   try {
@@ -102,12 +103,19 @@ public class BlockStorageMovementTracker implements 
Runnable {
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }
-// handle completed or inprogress blocks movements per trackId.
-blksMovementsStatusHandler.handle(resultPerTrackIdList);
+if (running) {
+  // handle completed or inprogress blocks movements per trackId.
+  blksMovementsStatusHandler.handle(resultPerTrackIdList);
+}
 movementResults.remove(trackId);
   }
 }
-  } catch (ExecutionException | InterruptedException e) {
+  } catch (InterruptedException e) {
+if (running) {
+  LOG.error("Exception while moving block replica to target storage"
+  + " type", e);
+}
+  } catch (ExecutionException e) {
 // TODO: Do we need failure retries and implement the same if required.
 LOG.error("Exception while moving block replica to target storage 
type",
 e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/eacd760d/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 196cd58..4e57805 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -137,8 +137,8 @@ public class StoragePolicySatisfyWorker {
* thread.
*/
   void stop() {
-movementTrackerThread.interrupt();
 movementTracker.stopTracking();
+movementTrackerThread.interrupt();
   }
 
   /**
@@ -147,7 +147,8 @@ public class StoragePolicySatisfyWorker {
   void waitToFinishWorkerThread() {
 try {
   movementTrackerThread.join(3000);
-} catch (InterruptedException ie) {
+} catch (InterruptedException ignore) {
+  // ignore
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[01/50] [abbrv] hadoop git commit: HDFS-10885. [SPS]: Mover tool should not be allowed to run when Storage Policy Satisfier is on. Contributed by Wei Zhou [Forced Update!]

2018-01-23 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 0060d356e -> 55ddb0742 (forced update)


HDFS-10885. [SPS]: Mover tool should not be allowed to run when Storage Policy 
Satisfier is on. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/81a34266
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/81a34266
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/81a34266

Branch: refs/heads/HDFS-10285
Commit: 81a34266df01350920c918cb5f2451d27062e456
Parents: daeb4b1
Author: Rakesh Radhakrishnan 
Authored: Tue Dec 6 17:56:08 2016 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:09:56 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  4 ++
 .../hadoop/hdfs/protocol/ClientProtocol.java|  8 +++
 .../ClientNamenodeProtocolTranslatorPB.java | 18 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  9 +++
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  5 ++
 ...tNamenodeProtocolServerSideTranslatorPB.java | 18 ++
 .../hadoop/hdfs/server/balancer/ExitStatus.java |  3 +-
 .../server/blockmanagement/BlockManager.java| 27 -
 .../hdfs/server/common/HdfsServerConstants.java |  3 +
 .../apache/hadoop/hdfs/server/mover/Mover.java  | 23 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |  6 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java | 11 
 .../server/namenode/StoragePolicySatisfier.java | 37 +++
 .../src/main/resources/hdfs-default.xml |  9 +++
 .../TestStoragePolicySatisfyWorker.java |  2 +
 .../hadoop/hdfs/server/mover/TestMover.java | 64 ++--
 .../hdfs/server/mover/TestStorageMover.java |  2 +
 .../namenode/TestStoragePolicySatisfier.java| 23 +++
 18 files changed, 258 insertions(+), 14 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a34266/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index f5847cb..0dd9e0c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3093,6 +3093,10 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+return namenode.isStoragePolicySatisfierRunning();
+  }
+
   Tracer getTracer() {
 return tracer;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a34266/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index e51b05d..b2f5df4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1745,4 +1745,12 @@ public interface ClientProtocol {
*/
   @Idempotent
   void satisfyStoragePolicy(String path) throws IOException;
+
+  /**
+   * Check if StoragePolicySatisfier is running.
+   * @return true if StoragePolicySatisfier is running
+   * @throws IOException
+   */
+  @Idempotent
+  boolean isStoragePolicySatisfierRunning() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/81a34266/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
index ee9f73f..01c9e69 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
@@ -147,6 +147,8 @@ import 

[23/50] [abbrv] hadoop git commit: HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks before removing the xattr. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks 
before removing the xattr. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/76b47b2b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/76b47b2b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/76b47b2b

Branch: refs/heads/HDFS-10285
Commit: 76b47b2b5915d5243fa3535cfefe926582d3bddb
Parents: 98ad30d
Author: Uma Maheswara Rao G 
Authored: Mon Jul 10 18:00:58 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:36 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  15 +++
 .../server/namenode/StoragePolicySatisfier.java |  20 +++-
 .../namenode/TestStoragePolicySatisfier.java| 102 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  90 
 4 files changed, 224 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/76b47b2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index dd491cd..6dd743a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4325,6 +4325,21 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Check file has low redundancy blocks.
+   */
+  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
+boolean result = false;
+for (BlockInfo block : bc.getBlocks()) {
+  short expected = getExpectedRedundancyNum(block);
+  final NumberReplicas n = countNodes(block);
+  if (expected > n.liveReplicas()) {
+result = true;
+  }
+}
+return result;
+  }
+
+  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/76b47b2b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 1b2afa3..97cbf1b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -99,7 +99,10 @@ public class StoragePolicySatisfier implements Runnable {
 // Represents that, the analysis skipped due to some conditions.
 // Example conditions are if no blocks really exists in block collection or
 // if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED;
+BLOCKS_TARGET_PAIRING_SKIPPED,
+// Represents that, All the reported blocks are satisfied the policy but
+// some of the blocks are low redundant.
+FEW_LOW_REDUNDANCY_BLOCKS
   }
 
   public StoragePolicySatisfier(final Namesystem namesystem,
@@ -247,6 +250,14 @@ public class StoragePolicySatisfier implements Runnable {
   case FEW_BLOCKS_TARGETS_PAIRED:
 this.storageMovementsMonitor.add(blockCollectionID, false);
 break;
+  case FEW_LOW_REDUNDANCY_BLOCKS:
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Adding trackID " + blockCollectionID
+  + " back to retry queue as some of the blocks"
+  + " are low redundant.");
+}
+this.storageMovementNeeded.add(blockCollectionID);
+break;
   // Just clean Xattrs
   case BLOCKS_TARGET_PAIRING_SKIPPED:
   case BLOCKS_ALREADY_SATISFIED:
@@ -347,11 +358,16 @@ public class StoragePolicySatisfier implements Runnable {
 boolean computeStatus = computeBlockMovingInfos(blockMovingInfos,
 blockInfo, expectedStorageTypes, existing, storages);
 if (computeStatus
-  

[24/50] [abbrv] hadoop git commit: HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running together. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running 
together. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d770a2fc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d770a2fc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d770a2fc

Branch: refs/heads/HDFS-10285
Commit: d770a2fcdd90de5dd9d7b0fe8ea7f68d49c7fb6e
Parents: 76b47b2
Author: Uma Maheswara Rao G 
Authored: Wed Jul 12 17:56:56 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:41 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 53 +++-
 .../namenode/TestStoragePolicySatisfier.java|  3 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  5 +-
 3 files changed, 34 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d770a2fc/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 97cbf1b..00b4cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -128,6 +128,14 @@ public class StoragePolicySatisfier implements Runnable {
*/
   public synchronized void start(boolean reconfigStart) {
 isRunning = true;
+if (checkIfMoverRunning()) {
+  isRunning = false;
+  LOG.error(
+  "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
+  + HdfsServerConstants.MOVER_ID_PATH.toString()
+  + " been opened. Maybe a Mover instance is running!");
+  return;
+}
 if (reconfigStart) {
   LOG.info("Starting StoragePolicySatisfier, as admin requested to "
   + "activate it.");
@@ -211,20 +219,6 @@ public class StoragePolicySatisfier implements Runnable {
 
   @Override
   public void run() {
-boolean isMoverRunning = !checkIfMoverRunning();
-synchronized (this) {
-  isRunning = isMoverRunning;
-  if (!isRunning) {
-// Stopping monitor thread and clearing queues as well
-this.clearQueues();
-this.storageMovementsMonitor.stopGracefully();
-LOG.error(
-"Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-+ HdfsServerConstants.MOVER_ID_PATH.toString()
-+ " been opened. Maybe a Mover instance is running!");
-return;
-  }
-}
 while (namesystem.isRunning() && isRunning) {
   try {
 if (!namesystem.isInSafeMode()) {
@@ -274,25 +268,34 @@ public class StoragePolicySatisfier implements Runnable {
 // we want to check block movements.
 Thread.sleep(3000);
   } catch (Throwable t) {
-synchronized (this) {
+handleException(t);
+  }
+}
+  }
+
+  private void handleException(Throwable t) {
+// double check to avoid entering into synchronized block.
+if (isRunning) {
+  synchronized (this) {
+if (isRunning) {
   isRunning = false;
   // Stopping monitor thread and clearing queues as well
   this.clearQueues();
   this.storageMovementsMonitor.stopGracefully();
-}
-if (!namesystem.isRunning()) {
-  LOG.info("Stopping StoragePolicySatisfier.");
-  if (!(t instanceof InterruptedException)) {
-LOG.info("StoragePolicySatisfier received an exception"
-+ " while shutting down.", t);
+  if (!namesystem.isRunning()) {
+LOG.info("Stopping StoragePolicySatisfier.");
+if (!(t instanceof InterruptedException)) {
+  LOG.info("StoragePolicySatisfier received an exception"
+  + " while shutting down.", t);
+}
+return;
   }
-  break;
 }
-LOG.error("StoragePolicySatisfier thread received runtime exception. "
-+ "Stopping Storage policy satisfier work", t);
-break;
   }
 }
+LOG.error("StoragePolicySatisfier thread received runtime exception. "
++ "Stopping Storage policy satisfier work", t);
+return;
   }
 
   private BlocksMovingAnalysisStatus 
analyseBlocksStorageMovementsAndAssignToDN(


[44/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/343d9cbd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
new file mode 100644
index 000..5635621
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -0,0 +1,572 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.ItemInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A Class to track the block collection IDs (Inode's ID) for which physical
+ * storage movement needed as per the Namespace and StorageReports from DN.
+ * It scan the pending directories for which storage movement is required and
+ * schedule the block collection IDs for movement. It track the info of
+ * scheduled items and remove the SPS xAttr from the file/Directory once
+ * movement is success.
+ */
+@InterfaceAudience.Private
+public class BlockStorageMovementNeeded {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(BlockStorageMovementNeeded.class);
+
+  private final Queue storageMovementNeeded =
+  new LinkedList();
+
+  /**
+   * Map of startId and number of child's. Number of child's indicate the
+   * number of files pending to satisfy the policy.
+   */
+  private final Map pendingWorkForDirectory =
+  new HashMap();
+
+  private final Map spsStatus =
+  new ConcurrentHashMap<>();
+
+  private final Namesystem namesystem;
+
+  // List of pending dir to satisfy the policy
+  private final Queue spsDirsToBeTraveresed = new LinkedList();
+
+  private final StoragePolicySatisfier sps;
+
+  private Daemon inodeIdCollector;
+
+  private final int maxQueuedItem;
+
+  // Amount of time to cache the SUCCESS status of path before turning it to
+  // NOT_AVAILABLE.
+  private static long statusClearanceElapsedTimeMs = 30;
+
+  public BlockStorageMovementNeeded(Namesystem namesystem,
+  StoragePolicySatisfier sps, int queueLimit) {
+this.namesystem = namesystem;
+this.sps = sps;
+this.maxQueuedItem = queueLimit;
+  }
+
+  /**
+   * Add the candidate to tracking list for which storage movement
+   * expected if necessary.
+   *
+   * @param trackInfo
+   *  - track info for satisfy the policy
+   */
+  public synchronized void add(ItemInfo trackInfo) {
+spsStatus.put(trackInfo.getStartId(),
+new StoragePolicySatisfyPathStatusInfo(
+StoragePolicySatisfyPathStatus.IN_PROGRESS));
+storageMovementNeeded.add(trackInfo);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which 

[33/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fb4a3d8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 57e9f94..70219f6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -203,11 +203,11 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
-   * Tests to verify that the block storage movement results will be propagated
+   * Tests to verify that the block storage movement report will be propagated
* to Namenode via datanode heartbeat.
*/
   @Test(timeout = 30)
-  public void testPerTrackIdBlocksStorageMovementResults() throws Exception {
+  public void testBlksStorageMovementAttemptFinishedReport() throws Exception {
 try {
   createCluster();
   // Change policy to ONE_SSD
@@ -229,7 +229,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -276,7 +276,7 @@ public class TestStoragePolicySatisfier {
 fileName, StorageType.DISK, 2, 3, dfs);
   }
 
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
 } finally {
   shutdownCluster();
 }
@@ -457,7 +457,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 2, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -630,7 +630,7 @@ public class TestStoragePolicySatisfier {
   // No block movement will be scheduled as there is no target node
   // available with the required storage type.
   waitForAttemptedItems(1, 3);
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
   DFSTestUtil.waitExpectedStorageType(
   file1, StorageType.ARCHIVE, 1, 3, dfs);
   DFSTestUtil.waitExpectedStorageType(
@@ -691,7 +691,7 @@ public class TestStoragePolicySatisfier {
   DFSTestUtil.waitExpectedStorageType(
   file, StorageType.DISK, 3, 3, dfs);
 
-  waitForBlocksMovementResult(1, 3);
+  waitForBlocksMovementAttemptReport(1, 3);
 } finally {
   shutdownCluster();
 }
@@ -871,7 +871,7 @@ public class TestStoragePolicySatisfier {
   Set dns = hdfsCluster.getNamesystem()
   .getBlockManager().getDatanodeManager().getDatanodes();
   for (DatanodeDescriptor dd : dns) {
-assertNull(dd.getBlocksToMoveStorages());
+assertNull(dd.getBlocksToMoveStorages(1));
   }
 
   // Enable heart beats now
@@ -1224,7 +1224,7 @@ public class TestStoragePolicySatisfier {
   /**
* Test SPS for batch processing.
*/
-  @Test(timeout = 30)
+  @Test(timeout = 300)
   public void testBatchProcessingForSPSDirectory() throws Exception {
 try {
   StorageType[][] diskTypes = new StorageType[][] {
@@ -1252,7 +1252,7 @@ public class TestStoragePolicySatisfier {
 DFSTestUtil.waitExpectedStorageType(fileName, StorageType.ARCHIVE, 2,
 3, dfs);
   }
-  waitForBlocksMovementResult(files.size(), 3);
+  waitForBlocksMovementAttemptReport(files.size(), 3);
   String expectedLogMessage = "StorageMovementNeeded queue remaining"
   + " capacity is zero";
   assertTrue("Log output does not contain expected log message: "
@@ -1268,7 +1268,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete /root when traversing Q
*  2. U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1330,7 +1330,7 @@ public class TestStoragePolicySatisfier {
*  1. Delete L when traversing Q
*  2. E, M, U, R, S should not be in queued.
*/
-  @Test
+  @Test(timeout = 30)
   public void testTraverseWhenRootParentDeleted() throws Exception {
 StorageType[][] diskTypes = new StorageType[][] {
 {StorageType.DISK, StorageType.ARCHIVE},
@@ -1387,6 +1387,82 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), 

[27/50] [abbrv] hadoop git commit: HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1c01ae23
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1c01ae23
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1c01ae23

Branch: refs/heads/HDFS-10285
Commit: 1c01ae23f07c6cde7df75d5269947201017445bb
Parents: 6402d5d
Author: Uma Maheswara Rao G 
Authored: Mon Jul 17 10:24:06 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:53 2018 +0530

--
 .../hdfs/server/blockmanagement/BlockManager.java   |  2 +-
 .../server/datanode/StoragePolicySatisfyWorker.java |  6 +++---
 .../hdfs/server/namenode/StoragePolicySatisfier.java|  6 +++---
 .../hadoop/hdfs/server/protocol/DatanodeProtocol.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/mover/TestMover.java  |  7 ---
 .../server/namenode/TestStoragePolicySatisfier.java | 12 ++--
 6 files changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c01ae23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 6dd743a..480e4a4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -434,7 +434,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final BlockIdManager blockIdManager;
 
-  /** For satisfying block storage policies */
+  /** For satisfying block storage policies. */
   private final StoragePolicySatisfier sps;
   private final BlockStorageMovementNeeded storageMovementNeeded =
   new BlockStorageMovementNeeded();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c01ae23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index f4f97dd..196cd58 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
   /**
* Block movement status code.
*/
-  public static enum BlockMovementStatus {
+  public enum BlockMovementStatus {
 /** Success. */
 DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
 /**
@@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
 
 private final int code;
 
-private BlockMovementStatus(int code) {
+BlockMovementStatus(int code) {
   this.code = code;
 }
 
@@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
 private final DatanodeInfo target;
 private final BlockMovementStatus status;
 
-public BlockMovementResult(long trackId, long blockId,
+BlockMovementResult(long trackId, long blockId,
 DatanodeInfo target, BlockMovementStatus status) {
   this.trackId = trackId;
   this.blockId = blockId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1c01ae23/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 00b4cd0..af3b7f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   private static class StorageTypeNodePair {
-public StorageType storageType = null;
-public DatanodeDescriptor dn = null;
+private StorageType 

[18/50] [abbrv] hadoop git commit: HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when dropSPSWork() called. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when 
dropSPSWork() called. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/445799b5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/445799b5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/445799b5

Branch: refs/heads/HDFS-10285
Commit: 445799b5619301e62a8e5ba5248d3e3f17b09500
Parents: 79dc141
Author: Uma Maheswara Rao G 
Authored: Tue May 30 18:12:17 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:18 2018 +0530

--
 .../hdfs/server/datanode/BlockStorageMovementTracker.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/445799b5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 99858bc..c7e952b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -88,13 +88,17 @@ public class BlockStorageMovementTracker implements 
Runnable {
   long trackId = result.getTrackId();
   List blocksMoving = moverTaskFutures
   .get(trackId);
+  if (blocksMoving == null) {
+LOG.warn("Future task doesn't exist for trackId " + trackId);
+continue;
+  }
   blocksMoving.remove(future);
 
   List resultPerTrackIdList =
   addMovementResultToTrackIdList(result);
 
   // Completed all the scheduled blocks movement under this 'trackId'.
-  if (blocksMoving.isEmpty()) {
+  if (blocksMoving.isEmpty() || moverTaskFutures.get(trackId) == null) 
{
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/50] [abbrv] hadoop git commit: HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the edits log. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the 
edits log. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/79dc141b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/79dc141b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/79dc141b

Branch: refs/heads/HDFS-10285
Commit: 79dc141bb8e1e93a7266aefe02f29cf2c34bf2f1
Parents: 84db58f
Author: Uma Maheswara Rao G 
Authored: Mon May 22 21:39:43 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:15 2018 +0530

--
 .../hdfs/server/namenode/FSDirAttrOp.java   |  91 
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 145 +++
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |  16 --
 .../hdfs/server/namenode/FSNamesystem.java  |  24 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  10 ++
 .../server/namenode/StoragePolicySatisfier.java |   4 +-
 .../TestPersistentStoragePolicySatisfier.java   |  90 +++-
 .../namenode/TestStoragePolicySatisfier.java|   5 +-
 9 files changed, 268 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/79dc141b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 66d5f3d..201605f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -43,14 +42,12 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -200,29 +197,6 @@ public class FSDirAttrOp {
 return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
-  String src, boolean logRetryCache) throws IOException {
-
-FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
-INodesInPath iip;
-fsd.writeLock();
-try {
-
-  // check operation permission.
-  iip = fsd.resolvePath(pc, src, DirOp.WRITE);
-  if (fsd.isPermissionEnabled()) {
-fsd.checkPathAccess(pc, iip, FsAction.WRITE);
-  }
-  XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-} finally {
-  fsd.writeUnlock();
-}
-fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-return fsd.getAuditFileInfo(iip);
-  }
-
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
   throws IOException {
 return bm.getStoragePolicies();
@@ -487,71 +461,6 @@ public class FSDirAttrOp {
 }
   }
 
-  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
-  BlockManager bm, FSDirectory fsd) throws IOException {
-
-final INode inode = FSDirectory.resolveLastINode(iip);
-final int snapshotId = iip.getLatestSnapshotId();
-final List candidateNodes = new ArrayList<>();
-
-// TODO: think about optimization here, label the dir instead
-// of the sub-files of the dir.
-if (inode.isFile()) {
-  candidateNodes.add(inode);
-} else if (inode.isDirectory()) {
-  for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
-  candidateNodes.add(node);
-}
-  }
-}
-
-// If node has satisfy xattr, then stop adding it
-// to satisfy movement queue.
-if 

[50/50] [abbrv] hadoop git commit: HDFS-13033: [SPS]: Implement a mechanism to do file block movements for external SPS. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-13033: [SPS]: Implement a mechanism to do file block movements for 
external SPS. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55ddb074
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55ddb074
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55ddb074

Branch: refs/heads/HDFS-10285
Commit: 55ddb0742654604a849e77c96ab65659f962f637
Parents: 6980058
Author: Uma Maheswara Rao G 
Authored: Tue Jan 23 16:19:46 2018 -0800
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 12:27:46 2018 +0530

--
 .../hdfs/server/balancer/NameNodeConnector.java |   8 +
 .../hdfs/server/common/sps/BlockDispatcher.java | 186 +
 .../sps/BlockMovementAttemptFinished.java   |  80 ++
 .../server/common/sps/BlockMovementStatus.java  |  53 
 .../common/sps/BlockStorageMovementTracker.java | 184 +
 .../sps/BlocksMovementsStatusHandler.java   |  95 +++
 .../hdfs/server/common/sps/package-info.java|  27 ++
 .../datanode/BlockStorageMovementTracker.java   | 186 -
 .../datanode/StoragePolicySatisfyWorker.java| 271 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   4 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |   3 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  12 +-
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   3 +-
 .../hdfs/server/namenode/sps/SPSService.java|  14 +-
 .../namenode/sps/StoragePolicySatisfier.java|  30 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java| 233 
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../sps/TestStoragePolicySatisfier.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  69 -
 19 files changed, 997 insertions(+), 469 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55ddb074/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index be59cce..ccb414a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -268,6 +268,14 @@ public class NameNodeConnector implements Closeable {
 }
   }
 
+  /**
+   * Returns fallbackToSimpleAuth. This will be true or false during calls to
+   * indicate if a secure client falls back to simple auth.
+   */
+  public AtomicBoolean getFallbackToSimpleAuth() {
+return fallbackToSimpleAuth;
+  }
+
   @Override
   public void close() {
 keyManager.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55ddb074/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
new file mode 100644
index 000..f87fcae
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common.sps;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;

[15/50] [abbrv] hadoop git commit: HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more than one coordinator for same file blocks. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more 
than one coordinator for same file blocks. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc2bb362
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc2bb362
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc2bb362

Branch: refs/heads/HDFS-10285
Commit: dc2bb3626b292af10fccb3575599324daa8e0e43
Parents: db3cc98
Author: Uma Maheswara Rao G 
Authored: Tue Apr 18 15:23:58 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:08 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +
 .../server/blockmanagement/DatanodeManager.java |  12 ++
 .../hdfs/server/datanode/BPServiceActor.java|   4 +-
 .../datanode/BlockStorageMovementTracker.java   |  37 +++-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  12 +-
 .../datanode/StoragePolicySatisfyWorker.java|  95 +--
 .../BlockStorageMovementAttemptedItems.java |  80 ++---
 .../server/namenode/StoragePolicySatisfier.java |  15 +-
 .../protocol/BlocksStorageMovementResult.java   |   6 +-
 .../src/main/proto/DatanodeProtocol.proto   |   1 +
 .../TestStoragePolicySatisfyWorker.java |  68 
 .../TestStoragePolicySatisfierWithHA.java   | 170 +--
 13 files changed, 413 insertions(+), 95 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc2bb362/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 3d54706..9551baf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -609,7 +609,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-  30 * 60 * 1000;
+  20 * 60 * 1000;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc2bb362/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 0c03608..996b986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -985,6 +985,9 @@ public class PBHelper {
   case FAILURE:
 status = Status.FAILURE;
 break;
+  case IN_PROGRESS:
+status = Status.IN_PROGRESS;
+break;
   default:
 throw new AssertionError("Unknown status: " + resultProto.getStatus());
   }
@@ -1011,6 +1014,9 @@ public class PBHelper {
   case FAILURE:
 status = BlocksStorageMovementResultProto.Status.FAILURE;
 break;
+  case IN_PROGRESS:
+status = BlocksStorageMovementResultProto.Status.IN_PROGRESS;
+break;
   default:
 throw new AssertionError("Unknown status: " + report.getStatus());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc2bb362/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index ed735a5..3c2cde1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ -1091,6 +1091,18 @@ public class DatanodeManager {

[37/50] [abbrv] hadoop git commit: HDFS-12310: [SPS]: Provide an option to track the status of in progress requests. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-12310: [SPS]: Provide an option to track the status of in progress 
requests. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3162832a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3162832a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3162832a

Branch: refs/heads/HDFS-10285
Commit: 3162832ad869dddee02f17891b731d1afd71e512
Parents: 9318ad9
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 3 08:18:14 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:30:11 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |  22 
 .../hadoop/hdfs/protocol/ClientProtocol.java|  21 
 .../hadoop/hdfs/protocol/HdfsConstants.java |  27 +
 .../ClientNamenodeProtocolTranslatorPB.java |  20 
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  33 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  17 ++-
 ...tNamenodeProtocolServerSideTranslatorPB.java |  23 +++-
 .../server/blockmanagement/BlockManager.java|  12 ++
 .../namenode/BlockStorageMovementNeeded.java| 109 +++
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 ++-
 .../server/namenode/StoragePolicySatisfier.java |   8 ++
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  35 +-
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestPersistentStoragePolicySatisfier.java   |   2 +-
 .../namenode/TestStoragePolicySatisfier.java|  67 
 .../hdfs/tools/TestStoragePolicyCommands.java   |  18 +++
 16 files changed, 424 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3162832a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 0dd9e0c..bd8b253 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -123,6 +123,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
@@ -3153,4 +3154,25 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 checkOpen();
 return new OpenFilesIterator(namenode, tracer, openFilesTypes, path);
   }
+
+  /**
+   * Check the storage policy satisfy status of the path for which
+   * {@link DFSClient#satisfyStoragePolicy(String)} is called.
+   *
+   * @return Storage policy satisfy status.
+   * 
+   * PENDING if path is in queue and not processed for satisfying
+   * the policy.
+   * IN_PROGRESS if satisfying the storage policy for path.
+   * SUCCESS if storage policy satisfied for the path.
+   * NOT_AVAILABLE if
+   * {@link DFSClient#satisfyStoragePolicy(String)} not called for
+   * path or SPS work is already finished.
+   * 
+   * @throws IOException
+   */
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+return namenode.checkStoragePolicySatisfyPathStatus(path);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3162832a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 0a2404c..8eb0335 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.hdfs.inotify.EventBatchList;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import 

[22/50] [abbrv] hadoop git commit: HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/98ad30d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/98ad30d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/98ad30d7

Branch: refs/heads/HDFS-10285
Commit: 98ad30d750a7424da6703eab25d4c4f1ef2d7b1c
Parents: 488da8d
Author: Uma Maheswara Rao G 
Authored: Mon Jun 19 17:16:49 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:33 2018 +0530

--
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 93 +++-
 .../src/site/markdown/ArchivalStorage.md| 21 +
 .../src/site/markdown/HDFSCommands.md   |  2 +
 .../hdfs/tools/TestStoragePolicyCommands.java   | 43 -
 4 files changed, 157 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/98ad30d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index aeb10d9..662957c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -32,6 +33,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
 import java.io.FileNotFoundException;
+import com.google.common.base.Joiner;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -245,6 +248,92 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 }
   }
 
+  /** Command to schedule blocks to move based on specified policy. */
+  private static class SatisfyStoragePolicyCommand implements
+  AdminHelper.Command {
+@Override
+public String getName() {
+  return "-satisfyStoragePolicy";
+}
+
+@Override
+public String getShortUsage() {
+  return "[" + getName() + " -path ]\n";
+}
+
+@Override
+public String getLongUsage() {
+  TableListing listing = AdminHelper.getOptionDescriptionListing();
+  listing.addRow("", "The path of the file/directory to satisfy"
+  + " storage policy");
+  return getShortUsage() + "\n" +
+  "Schedule blocks to move based on file/directory policy.\n\n" +
+  listing.toString();
+}
+
+@Override
+public int run(Configuration conf, List args) throws IOException {
+  final String path = StringUtils.popOptionWithArgument("-path", args);
+  if (path == null) {
+System.err.println("Please specify the path for setting the storage " +
+"policy.\nUsage: " + getLongUsage());
+return 1;
+  }
+
+  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  try {
+dfs.satisfyStoragePolicy(new Path(path));
+System.out.println("Scheduled blocks to move based on the current"
++ " storage policy on " + path);
+  } catch (Exception e) {
+System.err.println(AdminHelper.prettifyException(e));
+return 2;
+  }
+  return 0;
+}
+  }
+
+  /** Command to check storage policy satisfier status. */
+  private static class IsSPSRunningCommand implements AdminHelper.Command {
+@Override
+public String getName() {
+  return "-isSPSRunning";
+}
+
+@Override
+public String getShortUsage() {
+  return "[" + getName() + "]\n";
+}
+
+@Override
+public String getLongUsage() {
+  return getShortUsage() + "\n" +
+  "Check the status of Storage Policy Statisfier.\n\n";
+}
+
+@Override
+public int run(Configuration conf, List args) throws IOException {
+  if (!args.isEmpty()) {
+System.err.print("Can't understand arguments: "
++ Joiner.on(" ").join(args) + "\n");
+System.err.println("Usage is " + getLongUsage());
+return 1;
+  }
+  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  try {
+

[34/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fb4a3d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 9792f48..db4b395 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -155,7 +155,7 @@ import 
org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1504,14 +1504,15 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   boolean requestFullBlockReportLease,
   @Nonnull SlowPeerReports slowPeers,
   @Nonnull SlowDiskReports slowDisks,
-  BlocksStorageMovementResult[] blkMovementStatus) throws IOException {
+  BlocksStorageMoveAttemptFinished storageMovementFinishedBlks)
+  throws IOException {
 checkNNStartup();
 verifyRequest(nodeReg);
 return namesystem.handleHeartbeat(nodeReg, report,
 dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
 failedVolumes, volumeFailureSummary, requestFullBlockReportLease,
 slowPeers, slowDisks,
-blkMovementStatus);
+storageMovementFinishedBlks);
   }
 
   @Override // DatanodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fb4a3d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a4372d5..a28a806 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -44,7 +46,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.util.Daemon;
@@ -82,25 +84,38 @@ public class StoragePolicySatisfier implements Runnable {
   /**
* Represents the collective analysis status for all blocks.
*/
-  private enum BlocksMovingAnalysisStatus {
-// Represents that, the analysis skipped due to some conditions. A such
-// condition is if block collection is in incomplete state.
-ANALYSIS_SKIPPED_FOR_RETRY,
-// Represents that, all block storage movement needed blocks found its
-// targets.
-ALL_BLOCKS_TARGETS_PAIRED,
-// Represents that, only fewer or none of the block storage movement needed
-// block found its eligible targets.
-FEW_BLOCKS_TARGETS_PAIRED,
-// Represents that, none of the blocks found for block storage movements.
-BLOCKS_ALREADY_SATISFIED,
-// Represents that, the analysis skipped due to some conditions.
-// Example conditions are if no blocks really exists in block collection or
-// if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED,
-// Represents that, All the reported blocks are satisfied the policy but
-// some of the blocks are low redundant.
-FEW_LOW_REDUNDANCY_BLOCKS
+  private 

[39/50] [abbrv] hadoop git commit: HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-12106: [SPS]: Improve storage policy satisfier configurations. Contributed 
by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fd6f965
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fd6f965
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fd6f965

Branch: refs/heads/HDFS-10285
Commit: 1fd6f9653e90d457d1fb7240ee24c9cf1fcb1639
Parents: 8d0da51
Author: Surendra Singh Lilhore 
Authored: Wed Nov 15 20:22:27 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:32:22 2018 +0530

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  6 +++
 .../hadoop/hdfs/protocolPB/PBHelperClient.java  |  4 ++
 .../src/main/proto/ClientNamenodeProtocol.proto |  3 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 10 ++--
 .../server/blockmanagement/DatanodeManager.java | 12 ++---
 .../datanode/StoragePolicySatisfyWorker.java|  3 +-
 .../BlockStorageMovementAttemptedItems.java |  8 +--
 .../namenode/BlockStorageMovementNeeded.java| 46 
 .../hdfs/server/namenode/FSNamesystem.java  |  3 ++
 .../server/namenode/StoragePolicySatisfier.java | 42 ---
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 27 +++---
 .../src/main/resources/hdfs-default.xml | 17 --
 .../src/site/markdown/ArchivalStorage.md|  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 10 ++--
 .../namenode/TestStoragePolicySatisfier.java| 57 ++--
 15 files changed, 199 insertions(+), 51 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd6f965/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index ff52ef1..d9b5b9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -150,6 +150,12 @@ public final class HdfsConstants {
 SUCCESS,
 
 /**
+ * Few blocks failed to move and the path is still not
+ * fully satisfied the storage policy.
+ */
+FAILURE,
+
+/**
  * Status not available.
  */
 NOT_AVAILABLE

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd6f965/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
index 80550cb..d44fdbb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
@@ -3294,6 +3294,8 @@ public class PBHelperClient {
   return StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:
@@ -3310,6 +3312,8 @@ public class PBHelperClient {
   return HdfsConstants.StoragePolicySatisfyPathStatus.IN_PROGRESS;
 case SUCCESS:
   return HdfsConstants.StoragePolicySatisfyPathStatus.SUCCESS;
+case FAILURE:
+  return HdfsConstants.StoragePolicySatisfyPathStatus.FAILURE;
 case NOT_AVAILABLE:
   return HdfsConstants.StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
 default:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fd6f965/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
index 2e18bef..ee03c0d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
@@ -843,7 +843,8 @@ message CheckStoragePolicySatisfyPathStatusResponseProto {
 PENDING = 0;
 IN_PROGRESS = 1;
 

[12/50] [abbrv] hadoop git commit: HDFS-11239: [SPS]: Check Mover file ID lease also to determine whether Mover is running. Contributed by Wei Zhou

2018-01-23 Thread rakeshr
HDFS-11239: [SPS]: Check Mover file ID lease also to determine whether Mover is 
running. Contributed by Wei Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1fb36227
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1fb36227
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1fb36227

Branch: refs/heads/HDFS-10285
Commit: 1fb36227153d06868b8384b43b436f60b4e8bcd5
Parents: 2c31170
Author: Rakesh Radhakrishnan 
Authored: Fri Feb 17 20:49:38 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:12:57 2018 +0530

--
 .../hdfs/server/namenode/FSNamesystem.java  |  17 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |   7 ++
 .../server/namenode/StoragePolicySatisfier.java |  19 +---
 .../namenode/TestStoragePolicySatisfier.java| 108 +++
 4 files changed, 113 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb36227/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index bfd0ec2..53aefdb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -3518,7 +3518,22 @@ public class FSNamesystem implements Namesystem, 
FSNamesystemMBean,
   BlockInfo getStoredBlock(Block block) {
 return blockManager.getStoredBlock(block);
   }
-  
+
+  @Override
+  public boolean isFileOpenedForWrite(String path) {
+readLock();
+try {
+  INode inode = dir.getINode(path, FSDirectory.DirOp.READ);
+  INodeFile iNodeFile = INodeFile.valueOf(inode, path);
+  LeaseManager.Lease lease = leaseManager.getLease(iNodeFile);
+  return lease != null;
+} catch (IOException e) {
+  return false;
+} finally {
+  readUnlock();
+}
+  }
+
   @Override
   public boolean isInSnapshot(long blockCollectionID) {
 assert hasReadLock();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb36227/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
index e07376b..a2b07ca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Namesystem.java
@@ -45,4 +45,11 @@ public interface Namesystem extends RwLock, SafeMode {
* middle of the starting active services.
*/
   boolean inTransitionToActive();
+
+  /**
+   * Check if file is been opened for write purpose.
+   * @param filePath
+   * @return true if valid write lease exists, otherwise return false.
+   */
+  boolean isFileOpenedForWrite(String filePath);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1fb36227/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index dc58294..29c8a5d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,7 +17,6 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
-import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -151,19 +150,8 @@ public class StoragePolicySatisfier implements Runnable {
 
   // Return true if a Mover instance is running
   private boolean checkIfMoverRunning() {
-boolean ret = false;
-try {
-  String moverId = HdfsServerConstants.MOVER_ID_PATH.toString();
-  INode inode = namesystem.getFSDirectory().getINode(
-  moverId, FSDirectory.DirOp.READ);
-  if (inode != null) {
-ret = true;

[19/50] [abbrv] hadoop git commit: HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/94c1f0f4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/94c1f0f4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/94c1f0f4

Branch: refs/heads/HDFS-10285
Commit: 94c1f0f42356d2e7bfc43211f99e70c6061fd86f
Parents: 445799b
Author: Rakesh Radhakrishnan 
Authored: Mon Jun 5 12:32:41 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:22 2018 +0530

--
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 15 ++---
 .../namenode/TestStoragePolicySatisfier.java| 32 
 2 files changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c1f0f4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index 81d337f..bd4e5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -51,7 +51,6 @@ final class FSDirSatisfyStoragePolicyOp {
 
 assert fsd.getFSNamesystem().hasWriteLock();
 FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -62,8 +61,11 @@ final class FSDirSatisfyStoragePolicyOp {
 fsd.checkPathAccess(pc, iip, FsAction.WRITE);
   }
   XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-  fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  if (satisfyXAttr != null) {
+List xAttrs = Lists.newArrayListWithCapacity(1);
+xAttrs.add(satisfyXAttr);
+fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  }
 } finally {
   fsd.writeUnlock();
 }
@@ -79,16 +81,19 @@ final class FSDirSatisfyStoragePolicyOp {
 
 // TODO: think about optimization here, label the dir instead
 // of the sub-files of the dir.
-if (inode.isFile()) {
+if (inode.isFile() && inode.asFile().numBlocks() != 0) {
   candidateNodes.add(inode);
 } else if (inode.isDirectory()) {
   for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
+if (node.isFile() && node.asFile().numBlocks() != 0) {
   candidateNodes.add(node);
 }
   }
 }
 
+if (candidateNodes.isEmpty()) {
+  return null;
+}
 // If node has satisfy xattr, then stop adding it
 // to satisfy movement queue.
 if (inodeHasSatisfyXAttr(candidateNodes)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/94c1f0f4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fa954b8..8e08a1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -907,6 +907,38 @@ public class TestStoragePolicySatisfier {
 }
   }
 
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path filePath = new Path("/zeroSizeFile");
+  DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+  FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog();
+  long lastWrittenTxId = 

[43/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/343d9cbd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
deleted file mode 100644
index 9f733ff..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ /dev/null
@@ -1,1775 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
-import static org.junit.Assert.fail;
-import static org.slf4j.LoggerFactory.getLogger;
-
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Set;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.ReconfigurationException;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
-import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
-import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
-import org.junit.Assert;
-import org.junit.Test;
-import org.mockito.Mockito;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.slf4j.event.Level;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
- * moved and finding its suggested target locations to move.
- */
-public class TestStoragePolicySatisfier {
-
-  {
-GenericTestUtils.setLogLevel(
-getLogger(FSTreeTraverser.class), Level.DEBUG);
-  }
-
-  private static final String ONE_SSD = "ONE_SSD";
-  private static final String COLD = "COLD";
-  private static final Logger LOG =
-  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
-  private final Configuration config = new HdfsConfiguration();
-  private StorageType[][] allDiskTypes =
-  new StorageType[][]{{StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK},
-  {StorageType.DISK, StorageType.DISK}};
-  private MiniDFSCluster hdfsCluster = null;
-  final private int numOfDatanodes = 3;
-  

[29/50] [abbrv] hadoop git commit: HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/574f6266
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/574f6266
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/574f6266

Branch: refs/heads/HDFS-10285
Commit: 574f62668ab8ef2c132dfa5543fa4ea4787b22bd
Parents: eacd760
Author: Uma Maheswara Rao G 
Authored: Thu Aug 17 13:21:07 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:22:59 2018 +0530

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../server/blockmanagement/BlockManager.java| 104 +++
 .../BlockStorageMovementAttemptedItems.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  20 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  22 ++--
 .../server/namenode/StoragePolicySatisfier.java |  20 ++--
 .../protocol/BlocksStorageMovementResult.java   |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  11 +-
 .../src/main/resources/hdfs-default.xml |  10 +-
 .../src/site/markdown/ArchivalStorage.md|  14 +--
 .../src/site/markdown/HDFSCommands.md   |   2 +-
 .../TestStoragePolicySatisfyWorker.java |   2 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  22 ++--
 .../hdfs/server/mover/TestStorageMover.java |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  99 --
 .../TestPersistentStoragePolicySatisfier.java   |   6 +-
 .../namenode/TestStoragePolicySatisfier.java|  35 +--
 .../TestStoragePolicySatisfierWithHA.java   |  10 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   8 ++
 .../hdfs/tools/TestStoragePolicyCommands.java   |  21 ++--
 22 files changed, 265 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/574f6266/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 38be348b..bc6e7a4 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -62,7 +62,7 @@ function hadoop_usage
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
-  hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage 
policies"
+  hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/574f6266/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 9551baf..828c354 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -598,10 +598,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
   // SPS related configurations
-  public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
-  "dfs.storage.policy.satisfier.activate";
-  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
-  true;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY =
+  "dfs.storage.policy.satisfier.enabled";
+  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
+  false;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =


[07/50] [abbrv] hadoop git commit: HDFS-11293: [SPS]: Local DN should be given preference as source node, when target available in same node. Contributed by Yuanbo Liu and Uma Maheswara Rao G

2018-01-23 Thread rakeshr
HDFS-11293: [SPS]: Local DN should be given preference as source node, when 
target available in same node. Contributed by Yuanbo Liu and Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c58ca3af
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c58ca3af
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c58ca3af

Branch: refs/heads/HDFS-10285
Commit: c58ca3afbc9ff64d2778750f3bca15409652bcc4
Parents: 364729b
Author: Uma Maheswara Rao G 
Authored: Mon Jan 9 14:37:42 2017 -0800
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:10:16 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 49 --
 .../namenode/TestStoragePolicySatisfier.java| 71 
 2 files changed, 113 insertions(+), 7 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c58ca3af/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index ee59617..b1b1464 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -298,9 +298,25 @@ public class StoragePolicySatisfier implements Runnable {
   new ArrayList();
   List existingBlockStorages =
   new ArrayList(Arrays.asList(storages));
+  // if expected type exists in source node already, local movement would 
be
+  // possible, so lets find such sources first.
+  Iterator iterator = 
existingBlockStorages.iterator();
+  while (iterator.hasNext()) {
+DatanodeStorageInfo datanodeStorageInfo = iterator.next();
+if (checkSourceAndTargetTypeExists(
+datanodeStorageInfo.getDatanodeDescriptor(), existing,
+expectedStorageTypes)) {
+  sourceWithStorageMap
+  .add(new 
StorageTypeNodePair(datanodeStorageInfo.getStorageType(),
+  datanodeStorageInfo.getDatanodeDescriptor()));
+  iterator.remove();
+  existing.remove(datanodeStorageInfo.getStorageType());
+}
+  }
+
+  // Let's find sources for existing types left.
   for (StorageType existingType : existing) {
-Iterator iterator =
-existingBlockStorages.iterator();
+iterator = existingBlockStorages.iterator();
 while (iterator.hasNext()) {
   DatanodeStorageInfo datanodeStorageInfo = iterator.next();
   StorageType storageType = datanodeStorageInfo.getStorageType();
@@ -317,7 +333,7 @@ public class StoragePolicySatisfier implements Runnable {
   findTargetsForExpectedStorageTypes(expectedStorageTypes);
 
   foundMatchingTargetNodesForBlock |= findSourceAndTargetToMove(
-  blockMovingInfos, blockInfo, existing, sourceWithStorageMap,
+  blockMovingInfos, blockInfo, sourceWithStorageMap,
   expectedStorageTypes, locsForExpectedStorageTypes);
 }
 return foundMatchingTargetNodesForBlock;
@@ -366,8 +382,6 @@ public class StoragePolicySatisfier implements Runnable {
*  - list of block source and target node pair
* @param blockInfo
*  - Block
-   * @param existing
-   *  - Existing storage types of block
* @param sourceWithStorageList
*  - Source Datanode with storages list
* @param expected
@@ -379,7 +393,6 @@ public class StoragePolicySatisfier implements Runnable {
*/
   private boolean findSourceAndTargetToMove(
   List blockMovingInfos, BlockInfo blockInfo,
-  List existing,
   List sourceWithStorageList,
   List expected,
   StorageTypeNodeMap locsForExpectedStorageTypes) {
@@ -403,6 +416,7 @@ public class StoragePolicySatisfier implements Runnable {
 targetNodes.add(chosenTarget.dn);
 targetStorageTypes.add(chosenTarget.storageType);
 chosenNodes.add(chosenTarget.dn);
+expected.remove(chosenTarget.storageType);
 // TODO: We can increment scheduled block count for this node?
   }
 }
@@ -442,16 +456,20 @@ public class StoragePolicySatisfier implements Runnable {
 targetNodes.add(chosenTarget.dn);
 targetStorageTypes.add(chosenTarget.storageType);
 chosenNodes.add(chosenTarget.dn);
+expected.remove(chosenTarget.storageType);
 // TODO: We can increment 

[35/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block 
storage movements. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8fb4a3d8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8fb4a3d8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8fb4a3d8

Branch: refs/heads/HDFS-10285
Commit: 8fb4a3d84d825502fd776033a955ed928f0fc651
Parents: a2808be
Author: Uma Maheswara Rao G 
Authored: Thu Oct 12 17:17:51 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:23:17 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |  12 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 150 +++-
 .../blockmanagement/DatanodeDescriptor.java |  50 ++-
 .../server/blockmanagement/DatanodeManager.java | 104 --
 .../hdfs/server/datanode/BPOfferService.java|   3 +-
 .../hdfs/server/datanode/BPServiceActor.java|  33 +-
 .../datanode/BlockStorageMovementTracker.java   |  80 ++---
 .../datanode/StoragePolicySatisfyWorker.java| 214 
 .../BlockStorageMovementAttemptedItems.java | 299 
 .../BlockStorageMovementInfosBatch.java |  61 
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +-
 .../server/namenode/StoragePolicySatisfier.java | 343 ++-
 .../protocol/BlockStorageMovementCommand.java   |  99 ++
 .../BlocksStorageMoveAttemptFinished.java   |  48 +++
 .../protocol/BlocksStorageMovementResult.java   |  74 
 .../hdfs/server/protocol/DatanodeProtocol.java  |   5 +-
 .../src/main/proto/DatanodeProtocol.proto   |  30 +-
 .../src/main/resources/hdfs-default.xml |  21 +-
 .../src/site/markdown/ArchivalStorage.md|   6 +-
 .../TestNameNodePrunesMissingStorages.java  |   5 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../server/datanode/TestBPOfferService.java |   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   6 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  52 ++-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   6 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../TestBlockStorageMovementAttemptedItems.java | 145 
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../namenode/TestStoragePolicySatisfier.java| 115 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  20 +-
 37 files changed, 908 insertions(+), 1135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8fb4a3d8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 38244fd..2b6aa46 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -613,11 +613,15 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =
-  5 * 60 * 1000;
+  1 * 60 * 1000;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-  20 * 60 * 1000;
+  5 * 60 * 1000;
+  public static final String 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_KEY =
+  "dfs.storage.policy.satisfier.low.max-streams.preference";
+  public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_DEFAULT =
+  false;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;


[03/50] [abbrv] hadoop git commit: HDFS-11032: [SPS]: Handling of block movement failure at the coordinator datanode. Contributed by Rakesh R

2018-01-23 Thread rakeshr
HDFS-11032: [SPS]: Handling of block movement failure at the coordinator 
datanode. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d648d35f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d648d35f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d648d35f

Branch: refs/heads/HDFS-10285
Commit: d648d35f22275a605699e0b2927ffa31d569f011
Parents: ab9525f
Author: Uma Maheswara Rao G 
Authored: Thu Dec 22 17:07:49 2016 -0800
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:10:03 2018 +0530

--
 .../datanode/StoragePolicySatisfyWorker.java|   9 +-
 .../namenode/TestStoragePolicySatisfier.java| 168 +++
 2 files changed, 143 insertions(+), 34 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d648d35f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index a69a38b..19f3fe2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.datatransfer.BlockPinningException;
 import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
 import org.apache.hadoop.hdfs.protocol.datatransfer.IOStreamPair;
 import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
@@ -251,6 +252,12 @@ public class StoragePolicySatisfyWorker {
 + " satisfying storageType:{}",
 block, source, target, targetStorageType);
 return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
+  } catch (BlockPinningException e) {
+// Pinned block won't be able to move to a different node. So, its not
+// required to do retries, just marked as SUCCESS.
+LOG.debug("Pinned block can't be moved, so skipping block:{}", block,
+e);
+return BlockMovementStatus.DN_BLK_STORAGE_MOVEMENT_SUCCESS;
   } catch (IOException e) {
 // TODO: handle failure retries
 LOG.warn(
@@ -282,7 +289,7 @@ public class StoragePolicySatisfyWorker {
 response = BlockOpResponseProto.parseFrom(vintPrefixed(in));
   }
   String logInfo = "reportedBlock move is failed";
-  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo);
+  DataTransferProtoUtil.checkBlockOpStatus(response, logInfo, true);
 }
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d648d35f/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fe23f3e..179b66b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -21,6 +21,7 @@ import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KE
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
+import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.TimeoutException;
@@ -36,12 +37,15 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import 

[38/50] [abbrv] hadoop git commit: HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and HDFS-11968 commits. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and 
HDFS-11968 commits. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d0da51a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d0da51a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d0da51a

Branch: refs/heads/HDFS-10285
Commit: 8d0da51adbccd2c0596b0ca89b90c90657c656a3
Parents: 3162832
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 10 10:06:43 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:32:18 2018 +0530

--
 .../federation/router/RouterRpcServer.java  |  19 +++
 .../namenode/TestStoragePolicySatisfier.java|   9 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  21 +--
 .../hdfs/tools/TestStoragePolicyCommands.java   |  57 -
 .../TestStoragePolicySatisfyAdminCommands.java  | 127 +++
 5 files changed, 162 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d0da51a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 9afd441..52d3e23 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -86,6 +86,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -2148,4 +2149,22 @@ public class RouterRpcServer extends AbstractService 
implements ClientProtocol {
   public Quota getQuotaModule() {
 return this.quotaCall;
   }
+
+  @Override
+  public void satisfyStoragePolicy(String path) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return false;
+  }
+
+  @Override
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+checkOperation(OperationCategory.READ, false);
+return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d0da51a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index f42d911..edd1aca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -61,6 +61,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Assert;
@@ -912,8 +913,6 @@ public class TestStoragePolicySatisfier {
 
 int defaultStripedBlockSize =
 StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4;
-config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-StripedFileTestUtil.getDefaultECPolicy().getName());
 config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
 config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
 

[13/50] [abbrv] hadoop git commit: HDFS-11336: [SPS]: Remove xAttrs when movements done or SPS disabled. Contributed by Yuanbo Liu.

2018-01-23 Thread rakeshr
HDFS-11336: [SPS]: Remove xAttrs when movements done or SPS disabled. 
Contributed by Yuanbo Liu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/46d56ba9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/46d56ba9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/46d56ba9

Branch: refs/heads/HDFS-10285
Commit: 46d56ba9895d7c4a828b014d8860378a1109430a
Parents: 1fb3622
Author: Uma Maheswara Rao G 
Authored: Tue Mar 14 00:52:24 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:13:01 2018 +0530

--
 .../BlockStorageMovementAttemptedItems.java |  14 ++-
 .../hdfs/server/namenode/FSDirAttrOp.java   |   8 ++
 .../hdfs/server/namenode/FSDirectory.java   |  16 +++
 .../server/namenode/StoragePolicySatisfier.java |  45 ++--
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   6 +-
 .../TestPersistentStoragePolicySatisfier.java   | 112 ++-
 7 files changed, 186 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d56ba9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index 042aca3..f15db73 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import static org.apache.hadoop.util.Time.monotonicNow;
 
+import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.HashMap;
@@ -54,6 +55,7 @@ public class BlockStorageMovementAttemptedItems {
   private final List 
storageMovementAttemptedResults;
   private volatile boolean monitorRunning = true;
   private Daemon timerThread = null;
+  private final StoragePolicySatisfier sps;
   //
   // It might take anywhere between 30 to 60 minutes before
   // a request is timed out.
@@ -69,7 +71,8 @@ public class BlockStorageMovementAttemptedItems {
 
   public BlockStorageMovementAttemptedItems(long recheckTimeout,
   long selfRetryTimeout,
-  BlockStorageMovementNeeded unsatisfiedStorageMovementFiles) {
+  BlockStorageMovementNeeded unsatisfiedStorageMovementFiles,
+  StoragePolicySatisfier sps) {
 if (recheckTimeout > 0) {
   this.minCheckTimeout = Math.min(minCheckTimeout, recheckTimeout);
 }
@@ -78,6 +81,7 @@ public class BlockStorageMovementAttemptedItems {
 this.blockStorageMovementNeeded = unsatisfiedStorageMovementFiles;
 storageMovementAttemptedItems = new HashMap<>();
 storageMovementAttemptedResults = new ArrayList<>();
+this.sps = sps;
   }
 
   /**
@@ -200,6 +204,9 @@ public class BlockStorageMovementAttemptedItems {
 } catch (InterruptedException ie) {
   LOG.info("BlocksStorageMovementAttemptResultMonitor thread "
   + "is interrupted.", ie);
+} catch (IOException ie) {
+  LOG.warn("BlocksStorageMovementAttemptResultMonitor thread "
+  + "received exception and exiting.", ie);
 }
   }
 }
@@ -248,7 +255,7 @@ public class BlockStorageMovementAttemptedItems {
   }
 
   @VisibleForTesting
-  void blockStorageMovementResultCheck() {
+  void blockStorageMovementResultCheck() throws IOException {
 synchronized (storageMovementAttemptedResults) {
   Iterator resultsIter =
   storageMovementAttemptedResults.iterator();
@@ -296,6 +303,9 @@ public class BlockStorageMovementAttemptedItems {
   + " reported from co-ordinating datanode. But the trackID "
   + "doesn't exists in storageMovementAttemptedItems list",
   storageMovementAttemptedResult.getTrackId());
+  // Remove xattr for the track id.
+  this.sps.notifyBlkStorageMovementFinished(
+  storageMovementAttemptedResult.getTrackId());
 }
   }
   // Remove trackID from the attempted list, if any.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/46d56ba9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 

[30/50] [abbrv] hadoop git commit: HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. Contributed by Surendra Singh Lilhore.

2018-01-23 Thread rakeshr
HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/44f0659b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/44f0659b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/44f0659b

Branch: refs/heads/HDFS-10285
Commit: 44f0659b61af9d66c3fb03ec9a18c41c35636ea0
Parents: 574f626
Author: Uma Maheswara Rao G 
Authored: Wed Aug 23 15:37:03 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 11:23:06 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  21 +-
 .../server/blockmanagement/DatanodeManager.java |  14 +-
 .../hdfs/server/datanode/BPOfferService.java|   1 +
 .../BlockStorageMovementAttemptedItems.java |  95 +---
 .../namenode/BlockStorageMovementNeeded.java| 233 ++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  91 +++-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 108 ++---
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../TestBlockStorageMovementAttemptedItems.java |  34 +--
 .../TestPersistentStoragePolicySatisfier.java   | 104 +
 .../namenode/TestStoragePolicySatisfier.java| 127 +-
 14 files changed, 589 insertions(+), 259 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f0659b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 0f49f40..4443b26 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementNeeded;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -438,9 +437,6 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private final BlockStorageMovementNeeded storageMovementNeeded =
-  new BlockStorageMovementNeeded();
-
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -486,8 +482,7 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
-conf);
+sps = new StoragePolicySatisfier(namesystem, this, conf);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5010,20 +5005,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Set file block collection for which storage movement needed for its 
blocks.
-   *
-   * @param id
-   *  - file block collection id.
-   */
-  public void satisfyStoragePolicy(long id) {
-storageMovementNeeded.add(id);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Added block collection id {} to block "
-  + "storageMovementNeeded queue", id);
-}
-  }
-
-  /**
* Gets the storage policy satisfier instance.
*
* @return sps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/44f0659b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 

[48/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-01-23 Thread rakeshr
HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for 
external/internal implementations. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3e280b90
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3e280b90
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3e280b90

Branch: refs/heads/HDFS-10285
Commit: 3e280b905aa681aa07a655474155733c2ce5f6fe
Parents: bcce97f
Author: Rakesh Radhakrishnan 
Authored: Fri Jan 19 08:51:49 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Wed Jan 24 12:27:37 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  61 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirectory.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  10 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |  44 
 .../namenode/sps/BlockMovementListener.java |  40 
 .../sps/BlockStorageMovementAttemptedItems.java |  28 +--
 .../sps/BlockStorageMovementNeeded.java | 207 ---
 .../hdfs/server/namenode/sps/Context.java   |  43 ++--
 .../server/namenode/sps/FileIdCollector.java|  43 
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |  62 ++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  62 ++
 .../sps/IntraSPSNameNodeFileIdCollector.java| 178 
 .../hdfs/server/namenode/sps/ItemInfo.java  |  81 
 .../hdfs/server/namenode/sps/SPSPathIds.java|  63 ++
 .../hdfs/server/namenode/sps/SPSService.java| 107 ++
 .../namenode/sps/StoragePolicySatisfier.java| 175 +++-
 .../TestBlockStorageMovementAttemptedItems.java |  19 +-
 .../sps/TestStoragePolicySatisfier.java | 111 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  19 +-
 20 files changed, 938 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3e280b90/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index e97fb46..b8f49cb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -93,8 +93,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -441,7 +441,8 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private Context spsctxt = null;
+  private final SPSPathIds spsPaths;
+
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -487,8 +488,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
-sps = new StoragePolicySatisfier(spsctxt);
+sps = new StoragePolicySatisfier(conf);
+spsPaths = new SPSPathIds();
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5034,8 +5035,7 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-// TODO: FSDirectory will get removed via HDFS-12911 modularization work
-sps.start(false, namesystem.getFSDirectory());
+sps.start(false);
   }
 
   /**
@@ -5071,8 +5071,7 @@ public class BlockManager 

[40/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-01-23 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/343d9cbd/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
new file mode 100644
index 000..c1a2b8b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -0,0 +1,580 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
+ * to be moved and finding its expected target locations in order to satisfy 
the
+ * storage policy.
+ */
+public class TestStoragePolicySatisfierWithStripedFile {
+
+  private static final Logger LOG = LoggerFactory
+  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
+
+  private final int stripesPerBlock = 2;
+
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocks;
+  private int cellSize;
+  private int defaultStripeBlockSize;
+
+  private ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
+  /**
+   * Initialize erasure coding policy.
+   */
+  @Before
+  public void init(){
+ecPolicy = getEcPolicy();
+dataBlocks = ecPolicy.getNumDataUnits();
+parityBlocks = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+defaultStripeBlockSize = cellSize * stripesPerBlock;
+  }
+
+  /**
+   * Tests to verify that all the striped blocks(data + parity blocks) are
+   * moving to satisfy the storage policy.
+   */
+  @Test(timeout = 30)
+  public void testMoverWithFullStripe() throws Exception {
+// start 10 datanodes
+int numOfDatanodes = 10;
+int storagesPerDatanode = 2;
+long capacity = 20 * defaultStripeBlockSize;
+long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+for (int i = 0; i < numOfDatanodes; i++) {
+  for (int j = 0; j < storagesPerDatanode; j++) {
+capacities[i][j] = capacity;
+  }
+}
+
+final Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+true);
+initConfWithStripe(conf, defaultStripeBlockSize);
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(numOfDatanodes)
+

[2/3] hadoop git commit: YARN-5473. Expose per-application over-allocation info in the Resource Manager. Contributed by Haibo Chen.

2018-01-23 Thread szegedim
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c95d31fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
index ed71ea2..0243443 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/records/impl/pb/ApplicationAttemptStateDataPBImpl.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.security.Credentials;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
+import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl;
 import org.apache.hadoop.yarn.api.records.impl.pb.ProtoUtils;
@@ -56,8 +57,9 @@ public class ApplicationAttemptStateDataPBImpl extends
   private Container masterContainer = null;
   private ByteBuffer appAttemptTokens = null;
 
-  private Map resourceSecondsMap;
+  private Map guaranteedResourceSecondsMap;
   private Map preemptedResourceSecondsMap;
+  private Map opportunisticResourceSecondsMap;
 
   public ApplicationAttemptStateDataPBImpl() {
 builder = ApplicationAttemptStateDataProto.newBuilder();
@@ -243,30 +245,72 @@ public class ApplicationAttemptStateDataPBImpl extends
   }
 
   @Override
+  @Deprecated
   public long getMemorySeconds() {
-ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
-return p.getMemorySeconds();
+return getGuaranteedMemorySeconds();
   }
  
   @Override
+  @Deprecated
   public long getVcoreSeconds() {
-ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
-return p.getVcoreSeconds();
+return getGuaranteedVcoreSeconds();
   }
 
   @Override
+  @Deprecated
   public void setMemorySeconds(long memorySeconds) {
-maybeInitBuilder();
-builder.setMemorySeconds(memorySeconds);
+setGuaranteedMemorySeconds(memorySeconds);
   }
  
   @Override
+  @Deprecated
   public void setVcoreSeconds(long vcoreSeconds) {
+setGuaranteedVcoreSeconds(vcoreSeconds);
+  }
+
+  @Override
+  public long getGuaranteedMemorySeconds() {
+ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
+return p.getMemorySeconds();
+  }
+
+  @Override
+  public void setGuaranteedMemorySeconds(long memorySeconds) {
+maybeInitBuilder();
+builder.setMemorySeconds(memorySeconds);
+  }
+
+  @Override
+  public long getGuaranteedVcoreSeconds() {
+ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
+return p.getVcoreSeconds();
+  }
+
+  @Override
+  public void setGuaranteedVcoreSeconds(long vcoreSeconds) {
 maybeInitBuilder();
 builder.setVcoreSeconds(vcoreSeconds);
   }
 
   @Override
+  public long getOpportunisticMemorySeconds() {
+Map tmp = getOpportunisticResourceSecondsMap();
+if (tmp.containsKey(ResourceInformation.MEMORY_MB.getName())) {
+  return tmp.get(ResourceInformation.MEMORY_MB.getName());
+}
+return 0;
+  }
+
+  @Override
+  public long getOpportunisticVcoreSeconds() {
+Map tmp = getOpportunisticResourceSecondsMap();
+if (tmp.containsKey(ResourceInformation.VCORES.getName())) {
+  return tmp.get(ResourceInformation.VCORES.getName());
+}
+return 0;
+  }
+
+  @Override
   public long getPreemptedMemorySeconds() {
 ApplicationAttemptStateDataProtoOrBuilder p = viaProto ? proto : builder;
 return p.getPreemptedMemorySeconds();
@@ -410,21 +454,35 @@ public class ApplicationAttemptStateDataPBImpl extends
   }
 
   @Override
+  @Deprecated
   public Map getResourceSecondsMap() {
-if (this.resourceSecondsMap != null) {
-  return this.resourceSecondsMap;
+return getGuaranteedResourceSecondsMap();
+  }
+
+  @Override
+  @Deprecated
+  public void setResourceSecondsMap(Map resourceSecondsMap) {
+

[3/3] hadoop git commit: YARN-5473. Expose per-application over-allocation info in the Resource Manager. Contributed by Haibo Chen.

2018-01-23 Thread szegedim
YARN-5473. Expose per-application over-allocation info in the Resource Manager. 
Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c95d31fd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c95d31fd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c95d31fd

Branch: refs/heads/YARN-1011
Commit: c95d31fd73ff4468d5eb7d761f738c7451e900ad
Parents: 43f51bc
Author: Miklos Szegedi 
Authored: Tue Jan 23 22:34:49 2018 -0800
Committer: Miklos Szegedi 
Committed: Tue Jan 23 22:34:49 2018 -0800

--
 .../apache/hadoop/mapreduce/TypeConverter.java  |   4 +-
 .../hadoop/mapreduce/TestTypeConverter.java |   4 +-
 .../hadoop/mapred/TestResourceMgrDelegate.java  |   2 +-
 .../records/ApplicationResourceUsageReport.java | 158 +++---
 .../src/main/proto/yarn_protos.proto|   2 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |  11 +-
 .../apache/hadoop/yarn/client/cli/TopCLI.java   |  18 +-
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  23 ++-
 .../ApplicationResourceUsageReportPBImpl.java   | 205 +++
 .../hadoop/yarn/util/resource/Resources.java|  20 ++
 ...pplicationHistoryManagerOnTimelineStore.java |  83 +---
 .../TestApplicationHistoryClientService.java|   8 +-
 ...pplicationHistoryManagerOnTimelineStore.java |  18 +-
 .../metrics/ApplicationMetricsConstants.java|  10 +-
 .../hadoop/yarn/server/utils/BuilderUtils.java  |  15 +-
 .../hadoop/yarn/server/webapp/dao/AppInfo.java  |  39 +++-
 .../server/resourcemanager/RMAppManager.java|  18 +-
 .../server/resourcemanager/RMServerUtils.java   |   3 +-
 .../metrics/TimelineServiceV1Publisher.java |  14 +-
 .../metrics/TimelineServiceV2Publisher.java |  14 +-
 .../resourcemanager/recovery/RMStateStore.java  |   5 +-
 .../records/ApplicationAttemptStateData.java| 144 +++--
 .../pb/ApplicationAttemptStateDataPBImpl.java   | 110 --
 .../server/resourcemanager/rmapp/RMAppImpl.java |  38 ++--
 .../resourcemanager/rmapp/RMAppMetrics.java |  38 ++--
 .../attempt/AggregateAppResourceUsage.java  |  51 +++--
 .../rmapp/attempt/RMAppAttemptImpl.java |  21 +-
 .../rmapp/attempt/RMAppAttemptMetrics.java  |  47 +++--
 .../ContainerResourceUsageReport.java   |  46 +
 .../rmcontainer/RMContainer.java|   7 +-
 .../rmcontainer/RMContainerImpl.java| 119 ---
 .../scheduler/AbstractYarnScheduler.java|   2 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  63 +++---
 .../scheduler/YarnScheduler.java|   2 +-
 .../scheduler/common/fica/FiCaSchedulerApp.java |   2 +-
 .../scheduler/fair/FSAppAttempt.java|   9 +-
 .../webapp/FairSchedulerAppsBlock.java  |   8 +
 .../resourcemanager/webapp/RMAppBlock.java  |   9 +-
 .../resourcemanager/webapp/RMAppsBlock.java |  10 +
 .../resourcemanager/webapp/dao/AppInfo.java |  78 +--
 .../yarn_server_resourcemanager_recovery.proto  |   1 +
 .../server/resourcemanager/TestAppManager.java  |  39 ++--
 .../resourcemanager/TestApplicationACLs.java|   4 +-
 .../resourcemanager/TestClientRMService.java|  45 ++--
 .../TestContainerResourceUsage.java | 184 ++---
 .../applicationsmanager/MockAsm.java|   4 +-
 .../metrics/TestSystemMetricsPublisher.java |  23 ++-
 .../TestSystemMetricsPublisherForV2.java|  12 +-
 .../recovery/RMStateStoreTestBase.java  |  12 +-
 .../recovery/TestZKRMStateStore.java|  40 ++--
 .../rmapp/TestRMAppTransitions.java |   6 +-
 .../attempt/TestRMAppAttemptTransitions.java|  32 +--
 .../capacity/TestCapacityScheduler.java |   5 +-
 .../TestCapacitySchedulerNodeLabelUpdate.java   |  20 +-
 .../resourcemanager/webapp/TestAppPage.java |   2 +-
 .../resourcemanager/webapp/TestRMWebApp.java|   3 +-
 .../webapp/TestRMWebAppFairScheduler.java   |   2 +-
 .../webapp/TestRMWebServicesApps.java   |   2 +-
 .../router/webapp/RouterWebServiceUtil.java |  14 +-
 .../router/webapp/TestRouterWebServiceUtil.java |  14 +-
 60 files changed, 1426 insertions(+), 516 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c95d31fd/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
 

[1/3] hadoop git commit: YARN-5473. Expose per-application over-allocation info in the Resource Manager. Contributed by Haibo Chen.

2018-01-23 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/YARN-1011 43f51bcdb -> c95d31fd7


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c95d31fd/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
index 3508ab4..ad3413f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java
@@ -23,17 +23,12 @@ import java.util.ArrayList;
 import java.util.Collection;
 import java.util.HashMap;
 import java.util.List;
-import java.util.Map;
 
-import org.apache.commons.lang.time.DateUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerState;
-import org.apache.hadoop.yarn.api.records.Resource;
-import org.apache.hadoop.yarn.api.records.ResourceInformation;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import 
org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
@@ -43,6 +38,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AggregateAppResourceUsage;
+import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerResourceUsageReport;
 import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
 import org.apache.log4j.Level;
@@ -84,13 +80,13 @@ public class TestContainerResourceUsage {
 
 RMAppMetrics rmAppMetrics = app0.getRMAppMetrics();
 Assert.assertTrue(
-"Before app submittion, memory seconds should have been 0 but was "
-  + rmAppMetrics.getMemorySeconds(),
-rmAppMetrics.getMemorySeconds() == 0);
+"Before app submission, memory seconds should have been 0 but was "
+  + rmAppMetrics.getGuaranteedMemorySeconds(),
+rmAppMetrics.getGuaranteedMemorySeconds() == 0);
 Assert.assertTrue(
 "Before app submission, vcore seconds should have been 0 but was "
-  + rmAppMetrics.getVcoreSeconds(),
-rmAppMetrics.getVcoreSeconds() == 0);
+  + rmAppMetrics.getGuaranteedVcoreSeconds(),
+rmAppMetrics.getGuaranteedVcoreSeconds() == 0);
 
 RMAppAttempt attempt0 = app0.getCurrentAppAttempt();
 
@@ -105,7 +101,8 @@ public class TestContainerResourceUsage {
 // Allow metrics to accumulate.
 int sleepInterval = 1000;
 int cumulativeSleepTime = 0;
-while (rmAppMetrics.getMemorySeconds() <= 0 && cumulativeSleepTime < 5000) 
{
+while (rmAppMetrics.getGuaranteedMemorySeconds() <= 0
+&& cumulativeSleepTime < 5000) {
   Thread.sleep(sleepInterval);
   cumulativeSleepTime += sleepInterval;
 }
@@ -113,27 +110,35 @@ public class TestContainerResourceUsage {
 rmAppMetrics = app0.getRMAppMetrics();
 Assert.assertTrue(
 "While app is running, memory seconds should be >0 but is "
-+ rmAppMetrics.getMemorySeconds(),
-rmAppMetrics.getMemorySeconds() > 0);
++ rmAppMetrics.getGuaranteedMemorySeconds(),
+rmAppMetrics.getGuaranteedMemorySeconds() > 0);
 Assert.assertTrue(
 "While app is running, vcore seconds should be >0 but is "
-+ rmAppMetrics.getVcoreSeconds(),
-rmAppMetrics.getVcoreSeconds() > 0);
++ rmAppMetrics.getGuaranteedVcoreSeconds(),
+rmAppMetrics.getGuaranteedVcoreSeconds() > 0);
 
 MockRM.finishAMAndVerifyAppState(app0, rm, nm, am0);
 
 AggregateAppResourceUsage ru = 
calculateContainerResourceMetrics(rmContainer);
 rmAppMetrics = app0.getRMAppMetrics();
 
-

hadoop git commit: YARN-7796. Container-executor fails with segfault on certain OS configurations. Contributed by Gergo Repas.

2018-01-23 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 e4ad5f7f3 -> bf795560f


YARN-7796. Container-executor fails with segfault on certain OS configurations. 
Contributed by Gergo Repas.

(cherry picked from commit e7642a3e6f540b4b56367babfbaf35ee6b3c7675)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf795560
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf795560
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf795560

Branch: refs/heads/branch-3.0
Commit: bf795560fb617bc55d59dccb4c5522f5f6d14b44
Parents: e4ad5f7
Author: Miklos Szegedi 
Authored: Tue Jan 23 21:07:05 2018 -0800
Committer: Miklos Szegedi 
Committed: Tue Jan 23 22:21:12 2018 -0800

--
 .../impl/container-executor.c   | 20 +++-
 1 file changed, 15 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf795560/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index 8419313..264d637 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -998,13 +998,20 @@ static int open_file_as_nm(const char* filename) {
 static int copy_file(int input, const char* in_filename,
 const char* out_filename, mode_t perm) {
   const int buffer_size = 128*1024;
-  char buffer[buffer_size];
+  char* buffer = malloc(buffer_size);
+  if (buffer == NULL) {
+fprintf(LOGFILE, "Failed to allocate buffer while copying file: %s -> %s",
+  in_filename, out_filename);
+fflush(LOGFILE);
+return -1;
+  }
 
   int out_fd = open(out_filename, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW, perm);
   if (out_fd == -1) {
 fprintf(LOGFILE, "Can't open %s for output - %s\n", out_filename,
 strerror(errno));
 fflush(LOGFILE);
+free(buffer);
 return -1;
   }
 
@@ -1014,15 +1021,18 @@ static int copy_file(int input, const char* in_filename,
 while (pos < len) {
   ssize_t write_result = write(out_fd, buffer + pos, len - pos);
   if (write_result <= 0) {
-   fprintf(LOGFILE, "Error writing to %s - %s\n", out_filename,
-   strerror(errno));
-   close(out_fd);
-   return -1;
+fprintf(LOGFILE, "Error writing to %s - %s\n", out_filename,
+  strerror(errno));
+close(out_fd);
+free(buffer);
+return -1;
   }
   pos += write_result;
 }
 len = read(input, buffer, buffer_size);
   }
+  free(buffer);
+
   if (len < 0) {
 fprintf(LOGFILE, "Failed to read file %s - %s\n", in_filename,
strerror(errno));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7796. Container-executor fails with segfault on certain OS configurations. Contributed by Gergo Repas.

2018-01-23 Thread szegedim
Repository: hadoop
Updated Branches:
  refs/heads/trunk 95743c672 -> e7642a3e6


YARN-7796. Container-executor fails with segfault on certain OS configurations. 
Contributed by Gergo Repas.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e7642a3e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e7642a3e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e7642a3e

Branch: refs/heads/trunk
Commit: e7642a3e6f540b4b56367babfbaf35ee6b3c7675
Parents: 95743c6
Author: Miklos Szegedi 
Authored: Tue Jan 23 21:07:05 2018 -0800
Committer: Miklos Szegedi 
Committed: Tue Jan 23 21:07:05 2018 -0800

--
 .../impl/container-executor.c   | 20 +++-
 1 file changed, 15 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e7642a3e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
index b0b8e76..5ce6a00 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/impl/container-executor.c
@@ -1008,13 +1008,20 @@ static int open_file_as_nm(const char* filename) {
 static int copy_file(int input, const char* in_filename,
 const char* out_filename, mode_t perm) {
   const int buffer_size = 128*1024;
-  char buffer[buffer_size];
+  char* buffer = malloc(buffer_size);
+  if (buffer == NULL) {
+fprintf(LOGFILE, "Failed to allocate buffer while copying file: %s -> %s",
+  in_filename, out_filename);
+fflush(LOGFILE);
+return -1;
+  }
 
   int out_fd = open(out_filename, O_WRONLY|O_CREAT|O_EXCL|O_NOFOLLOW, perm);
   if (out_fd == -1) {
 fprintf(LOGFILE, "Can't open %s for output - %s\n", out_filename,
 strerror(errno));
 fflush(LOGFILE);
+free(buffer);
 return -1;
   }
 
@@ -1024,15 +1031,18 @@ static int copy_file(int input, const char* in_filename,
 while (pos < len) {
   ssize_t write_result = write(out_fd, buffer + pos, len - pos);
   if (write_result <= 0) {
-   fprintf(LOGFILE, "Error writing to %s - %s\n", out_filename,
-   strerror(errno));
-   close(out_fd);
-   return -1;
+fprintf(LOGFILE, "Error writing to %s - %s\n", out_filename,
+  strerror(errno));
+close(out_fd);
+free(buffer);
+return -1;
   }
   pos += write_result;
 }
 len = read(input, buffer, buffer_size);
   }
+  free(buffer);
+
   if (len < 0) {
 fprintf(LOGFILE, "Failed to read file %s - %s\n", in_filename,
strerror(errno));


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7783. Add validation step to ensure constraints are not violated due to order in which a request is processed. (asuresh)

2018-01-23 Thread asuresh
Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 744046337 -> 6965000e0


YARN-7783. Add validation step to ensure constraints are not violated due to 
order in which a request is processed. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6965000e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6965000e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6965000e

Branch: refs/heads/YARN-6592
Commit: 6965000e02ee1a6475738ea352fe129d44ddaaf7
Parents: 7440463
Author: Arun Suresh 
Authored: Tue Jan 23 08:15:58 2018 -0800
Committer: Arun Suresh 
Committed: Tue Jan 23 16:53:24 2018 -0800

--
 .../algorithm/DefaultPlacementAlgorithm.java| 117 +--
 .../constraint/TestPlacementProcessor.java  |  49 
 2 files changed, 154 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6965000e/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
index 9887749..cc4a878 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/algorithm/DefaultPlacementAlgorithm.java
@@ -22,6 +22,7 @@ import java.util.Iterator;
 import java.util.List;
 
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.AbstractYarnScheduler;
@@ -69,13 +70,9 @@ public class DefaultPlacementAlgorithm implements 
ConstraintPlacementAlgorithm {
   public boolean attemptPlacementOnNode(ApplicationId appId,
   SchedulingRequest schedulingRequest, SchedulerNode schedulerNode)
   throws InvalidAllocationTagsQueryException {
-int numAllocs = schedulingRequest.getResourceSizing().getNumAllocations();
-if (numAllocs > 0) {
-  if (PlacementConstraintsUtil.canSatisfyConstraints(appId,
-  schedulingRequest, schedulerNode,
-  constraintManager, tagsManager)) {
-return true;
-  }
+if (PlacementConstraintsUtil.canSatisfyConstraints(appId,
+schedulingRequest, schedulerNode, constraintManager, tagsManager)) {
+  return true;
 }
 return false;
   }
@@ -93,6 +90,9 @@ public class DefaultPlacementAlgorithm implements 
ConstraintPlacementAlgorithm {
 int rePlacementCount = RE_ATTEMPT_COUNT;
 while (rePlacementCount > 0) {
   doPlacement(requests, resp, allNodes, rejectedRequests);
+  // Double check if placement constraints are really satisfied
+  validatePlacement(requests.getApplicationId(), resp,
+  rejectedRequests);
   if (rejectedRequests.size() == 0 || rePlacementCount == 1) {
 break;
   }
@@ -122,9 +122,13 @@ public class DefaultPlacementAlgorithm implements 
ConstraintPlacementAlgorithm {
 break;
   }
   SchedulingRequest schedulingRequest = requestIterator.next();
+  PlacedSchedulingRequest placedReq =
+  new PlacedSchedulingRequest(schedulingRequest);
+  placedReq.setPlacementAttempt(requests.getPlacementAttempt());
   CircularIterator nodeIter =
   new CircularIterator(lastSatisfiedNode, nIter, allNodes);
-  int numAllocs = 
schedulingRequest.getResourceSizing().getNumAllocations();
+  int numAllocs =
+  schedulingRequest.getResourceSizing().getNumAllocations();
   while (nodeIter.hasNext() && numAllocs > 0) {
 SchedulerNode node = nodeIter.next();
 try {
@@ -135,9 +139,6 @@ public class DefaultPlacementAlgorithm implements 
ConstraintPlacementAlgorithm {
   requests.getApplicationId(), schedulingRequest, node)) {
 schedulingRequest.getResourceSizing()
 

hadoop git commit: HDFS-12772. RBF: Federation Router State State Store internal API. Contributed by Inigo Goiri.

2018-01-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 a80c2dbab -> 1534936ac


HDFS-12772. RBF: Federation Router State State Store internal API. Contributed 
by Inigo Goiri.

(cherry picked from commit 95743c672e6b42b227a22dfa7cc16edc7bdb58bb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1534936a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1534936a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1534936a

Branch: refs/heads/branch-2
Commit: 1534936ac1757387934525b350179271fdf5b982
Parents: a80c2dba
Author: Inigo Goiri 
Authored: Tue Jan 23 19:15:44 2018 -0800
Committer: Inigo Goiri 
Committed: Tue Jan 23 19:17:22 2018 -0800

--
 .../federation/router/FederationUtil.java   |  20 ++
 .../federation/router/RouterServiceState.java   |  31 +++
 .../server/federation/store/RouterStore.java|  78 +++
 .../federation/store/impl/RouterStoreImpl.java  |  93 +
 .../protocol/GetRouterRegistrationRequest.java  |  47 +
 .../protocol/GetRouterRegistrationResponse.java |  44 
 .../protocol/GetRouterRegistrationsRequest.java |  34 
 .../GetRouterRegistrationsResponse.java |  55 +
 .../store/protocol/RouterHeartbeatRequest.java  |  50 +
 .../store/protocol/RouterHeartbeatResponse.java |  49 +
 .../pb/GetRouterRegistrationRequestPBImpl.java  |  70 +++
 .../pb/GetRouterRegistrationResponsePBImpl.java |  79 
 .../pb/GetRouterRegistrationsRequestPBImpl.java |  61 ++
 .../GetRouterRegistrationsResponsePBImpl.java   | 102 ++
 .../impl/pb/RouterHeartbeatRequestPBImpl.java   |  78 +++
 .../impl/pb/RouterHeartbeatResponsePBImpl.java  |  70 +++
 .../federation/store/records/RouterState.java   | 169 
 .../store/records/StateStoreVersion.java|  91 +
 .../records/impl/pb/RouterStatePBImpl.java  | 202 +++
 .../impl/pb/StateStoreVersionPBImpl.java|  79 
 .../src/main/proto/FederationProtocol.proto |  44 
 .../store/driver/TestStateStoreDriverBase.java  |  13 ++
 .../store/records/TestRouterState.java  |  85 
 23 files changed, 1644 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1534936a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
index 224cac1..3f736d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -106,6 +107,25 @@ public final class FederationUtil {
   }
 
   /**
+   * Fetch the compile timestamp for this jar.
+   *
+   * @return Date compiled.
+   */
+  public static String getBuildVersion() {
+return VersionInfo.getBuildVersion();
+  }
+
+  /**
+   * Fetch the build/compile information for this jar.
+   *
+   * @return String Compilation info.
+   */
+  public static String getCompileInfo() {
+return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
++ VersionInfo.getBranch();
+  }
+
+  /**
* Create an instance of an interface with a constructor using a context.
*
* @param conf Configuration for the class names.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1534936a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
new file mode 100644
index 000..25a6466
--- /dev/null
+++ 

hadoop git commit: HDFS-12772. RBF: Federation Router State State Store internal API. Contributed by Inigo Goiri.

2018-01-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 6da08837d -> a43100ada


HDFS-12772. RBF: Federation Router State State Store internal API. Contributed 
by Inigo Goiri.

(cherry picked from commit 95743c672e6b42b227a22dfa7cc16edc7bdb58bb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a43100ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a43100ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a43100ad

Branch: refs/heads/branch-2.9
Commit: a43100adad6abec5a46c13dc7212a25afc2f0ac8
Parents: 6da0883
Author: Inigo Goiri 
Authored: Tue Jan 23 19:15:44 2018 -0800
Committer: Inigo Goiri 
Committed: Tue Jan 23 19:17:47 2018 -0800

--
 .../federation/router/FederationUtil.java   |  20 ++
 .../federation/router/RouterServiceState.java   |  31 +++
 .../server/federation/store/RouterStore.java|  78 +++
 .../federation/store/impl/RouterStoreImpl.java  |  93 +
 .../protocol/GetRouterRegistrationRequest.java  |  47 +
 .../protocol/GetRouterRegistrationResponse.java |  44 
 .../protocol/GetRouterRegistrationsRequest.java |  34 
 .../GetRouterRegistrationsResponse.java |  55 +
 .../store/protocol/RouterHeartbeatRequest.java  |  50 +
 .../store/protocol/RouterHeartbeatResponse.java |  49 +
 .../pb/GetRouterRegistrationRequestPBImpl.java  |  70 +++
 .../pb/GetRouterRegistrationResponsePBImpl.java |  79 
 .../pb/GetRouterRegistrationsRequestPBImpl.java |  61 ++
 .../GetRouterRegistrationsResponsePBImpl.java   | 102 ++
 .../impl/pb/RouterHeartbeatRequestPBImpl.java   |  78 +++
 .../impl/pb/RouterHeartbeatResponsePBImpl.java  |  70 +++
 .../federation/store/records/RouterState.java   | 169 
 .../store/records/StateStoreVersion.java|  91 +
 .../records/impl/pb/RouterStatePBImpl.java  | 202 +++
 .../impl/pb/StateStoreVersionPBImpl.java|  79 
 .../src/main/proto/FederationProtocol.proto |  44 
 .../store/driver/TestStateStoreDriverBase.java  |  13 ++
 .../store/records/TestRouterState.java  |  85 
 23 files changed, 1644 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43100ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
index 224cac1..3f736d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -106,6 +107,25 @@ public final class FederationUtil {
   }
 
   /**
+   * Fetch the compile timestamp for this jar.
+   *
+   * @return Date compiled.
+   */
+  public static String getBuildVersion() {
+return VersionInfo.getBuildVersion();
+  }
+
+  /**
+   * Fetch the build/compile information for this jar.
+   *
+   * @return String Compilation info.
+   */
+  public static String getCompileInfo() {
+return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
++ VersionInfo.getBranch();
+  }
+
+  /**
* Create an instance of an interface with a constructor using a context.
*
* @param conf Configuration for the class names.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a43100ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
new file mode 100644
index 000..25a6466
--- /dev/null
+++ 

hadoop git commit: HDFS-12772. RBF: Federation Router State State Store internal API. Contributed by Inigo Goiri.

2018-01-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 92f789a13 -> e4ad5f7f3


HDFS-12772. RBF: Federation Router State State Store internal API. Contributed 
by Inigo Goiri.

(cherry picked from commit 95743c672e6b42b227a22dfa7cc16edc7bdb58bb)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e4ad5f7f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e4ad5f7f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e4ad5f7f

Branch: refs/heads/branch-3.0
Commit: e4ad5f7f37f4babdf716cead1d594ab1190db1c5
Parents: 92f789a
Author: Inigo Goiri 
Authored: Tue Jan 23 19:15:44 2018 -0800
Committer: Inigo Goiri 
Committed: Tue Jan 23 19:16:27 2018 -0800

--
 .../federation/router/FederationUtil.java   |  20 ++
 .../federation/router/RouterServiceState.java   |  31 +++
 .../server/federation/store/RouterStore.java|  78 +++
 .../federation/store/impl/RouterStoreImpl.java  |  93 +
 .../protocol/GetRouterRegistrationRequest.java  |  47 +
 .../protocol/GetRouterRegistrationResponse.java |  44 
 .../protocol/GetRouterRegistrationsRequest.java |  34 
 .../GetRouterRegistrationsResponse.java |  55 +
 .../store/protocol/RouterHeartbeatRequest.java  |  50 +
 .../store/protocol/RouterHeartbeatResponse.java |  49 +
 .../pb/GetRouterRegistrationRequestPBImpl.java  |  70 +++
 .../pb/GetRouterRegistrationResponsePBImpl.java |  79 
 .../pb/GetRouterRegistrationsRequestPBImpl.java |  61 ++
 .../GetRouterRegistrationsResponsePBImpl.java   | 102 ++
 .../impl/pb/RouterHeartbeatRequestPBImpl.java   |  78 +++
 .../impl/pb/RouterHeartbeatResponsePBImpl.java  |  70 +++
 .../federation/store/records/RouterState.java   | 169 
 .../store/records/StateStoreVersion.java|  91 +
 .../records/impl/pb/RouterStatePBImpl.java  | 202 +++
 .../impl/pb/StateStoreVersionPBImpl.java|  79 
 .../src/main/proto/FederationProtocol.proto |  44 
 .../store/driver/TestStateStoreDriverBase.java  |  13 ++
 .../store/records/TestRouterState.java  |  85 
 23 files changed, 1644 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4ad5f7f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
index 224cac1..3f736d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -106,6 +107,25 @@ public final class FederationUtil {
   }
 
   /**
+   * Fetch the compile timestamp for this jar.
+   *
+   * @return Date compiled.
+   */
+  public static String getBuildVersion() {
+return VersionInfo.getBuildVersion();
+  }
+
+  /**
+   * Fetch the build/compile information for this jar.
+   *
+   * @return String Compilation info.
+   */
+  public static String getCompileInfo() {
+return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
++ VersionInfo.getBranch();
+  }
+
+  /**
* Create an instance of an interface with a constructor using a context.
*
* @param conf Configuration for the class names.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e4ad5f7f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
new file mode 100644
index 000..25a6466
--- /dev/null
+++ 

hadoop git commit: HDFS-12772. RBF: Federation Router State State Store internal API. Contributed by Inigo Goiri.

2018-01-23 Thread inigoiri
Repository: hadoop
Updated Branches:
  refs/heads/trunk d95c13774 -> 95743c672


HDFS-12772. RBF: Federation Router State State Store internal API. Contributed 
by Inigo Goiri.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95743c67
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95743c67
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95743c67

Branch: refs/heads/trunk
Commit: 95743c672e6b42b227a22dfa7cc16edc7bdb58bb
Parents: d95c137
Author: Inigo Goiri 
Authored: Tue Jan 23 19:15:44 2018 -0800
Committer: Inigo Goiri 
Committed: Tue Jan 23 19:15:44 2018 -0800

--
 .../federation/router/FederationUtil.java   |  20 ++
 .../federation/router/RouterServiceState.java   |  31 +++
 .../server/federation/store/RouterStore.java|  78 +++
 .../federation/store/impl/RouterStoreImpl.java  |  93 +
 .../protocol/GetRouterRegistrationRequest.java  |  47 +
 .../protocol/GetRouterRegistrationResponse.java |  44 
 .../protocol/GetRouterRegistrationsRequest.java |  34 
 .../GetRouterRegistrationsResponse.java |  55 +
 .../store/protocol/RouterHeartbeatRequest.java  |  50 +
 .../store/protocol/RouterHeartbeatResponse.java |  49 +
 .../pb/GetRouterRegistrationRequestPBImpl.java  |  70 +++
 .../pb/GetRouterRegistrationResponsePBImpl.java |  79 
 .../pb/GetRouterRegistrationsRequestPBImpl.java |  61 ++
 .../GetRouterRegistrationsResponsePBImpl.java   | 102 ++
 .../impl/pb/RouterHeartbeatRequestPBImpl.java   |  78 +++
 .../impl/pb/RouterHeartbeatResponsePBImpl.java  |  70 +++
 .../federation/store/records/RouterState.java   | 169 
 .../store/records/StateStoreVersion.java|  91 +
 .../records/impl/pb/RouterStatePBImpl.java  | 202 +++
 .../impl/pb/StateStoreVersionPBImpl.java|  79 
 .../src/main/proto/FederationProtocol.proto |  44 
 .../store/driver/TestStateStoreDriverBase.java  |  13 ++
 .../store/records/TestRouterState.java  |  85 
 23 files changed, 1644 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95743c67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
index 224cac1..3f736d4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/FederationUtil.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.ActiveNamenodeResolver;
 import 
org.apache.hadoop.hdfs.server.federation.resolver.FileSubclusterResolver;
 import org.apache.hadoop.hdfs.server.federation.store.StateStoreService;
+import org.apache.hadoop.util.VersionInfo;
 import org.codehaus.jettison.json.JSONArray;
 import org.codehaus.jettison.json.JSONException;
 import org.codehaus.jettison.json.JSONObject;
@@ -106,6 +107,25 @@ public final class FederationUtil {
   }
 
   /**
+   * Fetch the compile timestamp for this jar.
+   *
+   * @return Date compiled.
+   */
+  public static String getBuildVersion() {
+return VersionInfo.getBuildVersion();
+  }
+
+  /**
+   * Fetch the build/compile information for this jar.
+   *
+   * @return String Compilation info.
+   */
+  public static String getCompileInfo() {
+return VersionInfo.getDate() + " by " + VersionInfo.getUser() + " from "
++ VersionInfo.getBranch();
+  }
+
+  /**
* Create an instance of an interface with a constructor using a context.
*
* @param conf Configuration for the class names.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95743c67/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
new file mode 100644
index 000..25a6466
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterServiceState.java
@@ -0,0 +1,31 @@
+/**
+ * Licensed to the 

hadoop git commit: HDFS-12963. Error log level in ShortCircuitRegistry#removeShm. Contributed by hu xiaodong.

2018-01-23 Thread yqlin
Repository: hadoop
Updated Branches:
  refs/heads/trunk e307edcb4 -> d95c13774


HDFS-12963. Error log level in ShortCircuitRegistry#removeShm. Contributed by 
hu xiaodong.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d95c1377
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d95c1377
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d95c1377

Branch: refs/heads/trunk
Commit: d95c13774e1bd5b3cc61bf4da8bae4a93ed0040c
Parents: e307edc
Author: Yiqun Lin 
Authored: Wed Jan 24 10:43:36 2018 +0800
Committer: Yiqun Lin 
Committed: Wed Jan 24 10:43:36 2018 +0800

--
 .../apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d95c1377/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
index edb64dd..ea9e72c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ShortCircuitRegistry.java
@@ -114,7 +114,7 @@ public class ShortCircuitRegistry {
 
   public synchronized void removeShm(ShortCircuitShm shm) {
 if (LOG.isTraceEnabled()) {
-  LOG.debug("removing shm " + shm);
+  LOG.trace("removing shm " + shm);
 }
 // Stop tracking the shmId.
 RegisteredShm removedShm = segments.remove(shm.getShmId());


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: YARN-7540 and YARN-7605. Convert yarn app cli to call yarn api services and implement doAs for Api Service REST API. Contributed by Eric Yang and Jian He

2018-01-23 Thread billie
YARN-7540 and YARN-7605. Convert yarn app cli to call yarn api services and 
implement doAs for Api Service REST API. Contributed by Eric Yang and Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e307edcb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e307edcb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e307edcb

Branch: refs/heads/trunk
Commit: e307edcb472207a39d1cbe4be6f7fcddc7b4fd6d
Parents: 39b999a
Author: Billie Rinaldi 
Authored: Tue Jan 23 17:54:39 2018 -0800
Committer: Billie Rinaldi 
Committed: Tue Jan 23 17:54:39 2018 -0800

--
 .../org/apache/hadoop/http/HttpServer2.java |  39 ++
 .../hadoop-yarn-services-api/pom.xml|   9 +
 .../yarn/service/client/ApiServiceClient.java   | 450 +
 .../yarn/service/client/package-info.java   |  28 ++
 .../hadoop/yarn/service/webapp/ApiServer.java   | 487 ++-
 .../yarn/service/webapp/package-info.java   |  28 ++
 .../hadoop/yarn/service/TestApiServer.java  |  51 +-
 .../service/client/TestApiServiceClient.java| 259 ++
 .../src/test/resources/example-app.json |  15 +
 .../src/test/resources/log4j.properties |  19 +
 .../service/api/records/ReadinessCheck.java |   2 +
 .../yarn/service/api/records/Resource.java  |   1 -
 .../yarn/service/api/records/ServiceState.java  |   2 +-
 .../yarn/service/client/ServiceClient.java  |   4 +-
 .../hadoop/yarn/service/utils/JsonSerDeser.java |   3 +
 .../yarn/service/TestYarnNativeServices.java|  30 --
 .../client/TestBuildExternalComponents.java |   2 +-
 .../yarn/service/client/TestServiceCLI.java |  14 +-
 .../hadoop/yarn/client/api/AppAdminClient.java  |   6 +
 .../hadoop/yarn/client/cli/ApplicationCLI.java  |   3 +
 .../org/apache/hadoop/yarn/webapp/WebApps.java  |  37 +-
 .../registry/client/api/RegistryConstants.java  |   1 +
 .../client/impl/zk/RegistrySecurity.java|   9 +-
 .../server/resourcemanager/ResourceManager.java |  24 +-
 24 files changed, 1313 insertions(+), 210 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e307edcb/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
index fa447d8..65aadf3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java
@@ -865,6 +865,45 @@ public final class HttpServer2 implements FilterContainer {
   }
 
   /**
+   * Add an internal servlet in the server, with initialization parameters.
+   * Note: This method is to be used for adding servlets that facilitate
+   * internal communication and not for user facing functionality. For
+   * servlets added using this method, filters (except internal Kerberos
+   * filters) are not enabled.
+   *
+   * @param name The name of the servlet (can be passed as null)
+   * @param pathSpec The path spec for the servlet
+   * @param clazz The servlet class
+   * @param params init parameters
+   */
+  public void addInternalServlet(String name, String pathSpec,
+  Class clazz, Map params) {
+// Jetty doesn't like the same path spec mapping to different servlets, so
+// if there's already a mapping for this pathSpec, remove it and assume 
that
+// the newest one is the one we want
+final ServletHolder sh = new ServletHolder(clazz);
+sh.setName(name);
+sh.setInitParameters(params);
+final ServletMapping[] servletMappings =
+webAppContext.getServletHandler().getServletMappings();
+for (int i = 0; i < servletMappings.length; i++) {
+  if (servletMappings[i].containsPathSpec(pathSpec)) {
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Found existing " + servletMappings[i].getServletName() +
+  " servlet at path " + pathSpec + "; will replace mapping" +
+  " with " + sh.getName() + " servlet");
+}
+ServletMapping[] newServletMappings =
+ArrayUtil.removeFromArray(servletMappings, servletMappings[i]);
+webAppContext.getServletHandler()
+.setServletMappings(newServletMappings);
+break;
+  }
+}
+webAppContext.addServlet(sh, pathSpec);
+  }
+
+  /**
* Add the given handler to the front of the list of handlers.
*
* @param handler The handler to add


[1/2] hadoop git commit: YARN-7540 and YARN-7605. Convert yarn app cli to call yarn api services and implement doAs for Api Service REST API. Contributed by Eric Yang and Jian He

2018-01-23 Thread billie
Repository: hadoop
Updated Branches:
  refs/heads/trunk 39b999aba -> e307edcb4


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e307edcb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
index d3ad53e..7364445 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
@@ -82,6 +82,7 @@ public class WebApps {
   public Class clazz;
   public String name;
   public String spec;
+  public Map params;
 }
 
 final String name;
@@ -147,7 +148,19 @@ public class WebApps {
   servlets.add(struct);
   return this;
 }
-
+
+public Builder withServlet(String name, String pathSpec,
+Class servlet,
+Map params) {
+  ServletStruct struct = new ServletStruct();
+  struct.clazz = servlet;
+  struct.name = name;
+  struct.spec = pathSpec;
+  struct.params = params;
+  servlets.add(struct);
+  return this;
+}
+
 public Builder with(Configuration conf) {
   this.conf = conf;
   return this;
@@ -243,6 +256,11 @@ public class WebApps {
   pathList.add("/" + wsName + "/*");
 }
   }
+  for (ServletStruct s : servlets) {
+if (!pathList.contains(s.spec)) {
+  pathList.add(s.spec);
+}
+  }
   if (conf == null) {
 conf = new Configuration();
   }
@@ -315,7 +333,12 @@ public class WebApps {
 HttpServer2 server = builder.build();
 
 for(ServletStruct struct: servlets) {
-  server.addServlet(struct.name, struct.spec, struct.clazz);
+  if (struct.params != null) {
+server.addInternalServlet(struct.name, struct.spec,
+struct.clazz, struct.params);
+  } else {
+server.addServlet(struct.name, struct.spec, struct.clazz);
+  }
 }
 for(Map.Entry entry : attributes.entrySet()) {
   server.setAttribute(entry.getKey(), entry.getValue());
@@ -394,22 +417,16 @@ public class WebApps {
 }
 
 public WebApp start(WebApp webapp) {
-  return start(webapp, null, null);
+  return start(webapp, null);
 }
 
-public WebApp start(WebApp webapp, WebAppContext ui2Context,
-Map services) {
+public WebApp start(WebApp webapp, WebAppContext ui2Context) {
   WebApp webApp = build(webapp);
   HttpServer2 httpServer = webApp.httpServer();
   if (ui2Context != null) {
 addFiltersForNewContext(ui2Context);
 httpServer.addHandlerAtFront(ui2Context);
   }
-  if (services!=null) {
-String packageName = services.get("PackageName");
-String pathSpec = services.get("PathSpec");
-httpServer.addJerseyResourcePackage(packageName, pathSpec);
-  }
   try {
 httpServer.start();
 LOG.info("Web app " + name + " started at "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e307edcb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
index 0006dfd..cfa2d65 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/api/RegistryConstants.java
@@ -221,6 +221,7 @@ public interface RegistryConstants {
* No authentication; client is anonymous.
*/
   String REGISTRY_CLIENT_AUTH_ANONYMOUS = "";
+  String REGISTRY_CLIENT_AUTH_SIMPLE = "simple";
 
   /**
* Registry client authentication ID.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e307edcb/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-registry/src/main/java/org/apache/hadoop/registry/client/impl/zk/RegistrySecurity.java
 

hadoop git commit: HDFS-13033: [SPS]: Implement a mechanism to do file block movements for external SPS. Contributed by Rakesh R.

2018-01-23 Thread umamahesh
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 72eb1d4c9 -> 0060d356e


HDFS-13033: [SPS]: Implement a mechanism to do file block movements for 
external SPS. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0060d356
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0060d356
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0060d356

Branch: refs/heads/HDFS-10285
Commit: 0060d356e072e5180e2a718e609ac47c99defb56
Parents: 72eb1d4
Author: Uma Maheswara Rao G 
Authored: Tue Jan 23 16:19:46 2018 -0800
Committer: Uma Maheswara Rao G 
Committed: Tue Jan 23 16:19:46 2018 -0800

--
 .../hdfs/server/balancer/NameNodeConnector.java |   8 +
 .../hdfs/server/common/sps/BlockDispatcher.java | 186 +
 .../sps/BlockMovementAttemptFinished.java   |  80 ++
 .../server/common/sps/BlockMovementStatus.java  |  53 
 .../common/sps/BlockStorageMovementTracker.java | 184 +
 .../sps/BlocksMovementsStatusHandler.java   |  95 +++
 .../hdfs/server/common/sps/package-info.java|  27 ++
 .../datanode/BlockStorageMovementTracker.java   | 186 -
 .../datanode/StoragePolicySatisfyWorker.java| 271 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   4 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |   3 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  12 +-
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   3 +-
 .../hdfs/server/namenode/sps/SPSService.java|  14 +-
 .../namenode/sps/StoragePolicySatisfier.java|  30 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java| 233 
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../sps/TestStoragePolicySatisfier.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  69 -
 19 files changed, 997 insertions(+), 469 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0060d356/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index be59cce..ccb414a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -268,6 +268,14 @@ public class NameNodeConnector implements Closeable {
 }
   }
 
+  /**
+   * Returns fallbackToSimpleAuth. This will be true or false during calls to
+   * indicate if a secure client falls back to simple auth.
+   */
+  public AtomicBoolean getFallbackToSimpleAuth() {
+return fallbackToSimpleAuth;
+  }
+
   @Override
   public void close() {
 keyManager.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0060d356/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
new file mode 100644
index 000..f87fcae
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common.sps;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;

hadoop git commit: YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. Contributed by Eric Yang

2018-01-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 ab482ebe7 -> a80c2dbab


YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. 
Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a80c2dba
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a80c2dba
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a80c2dba

Branch: refs/heads/branch-2
Commit: a80c2dbab5ddfe71aa66f529629fa9964c7689b6
Parents: ab482eb
Author: Jian He 
Authored: Tue Jan 23 14:03:53 2018 -0800
Committer: Jian He 
Committed: Tue Jan 23 14:12:00 2018 -0800

--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a80c2dba/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 905f4da..a975a5b 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1667,7 +1667,7 @@
   
 Property to enable docker user remapping
 
yarn.nodemanager.runtime.linux.docker.enable-userremapping.allowed
-false
+true
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. Contributed by Eric Yang

2018-01-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 93220f90e -> 6da08837d


YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. 
Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6da08837
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6da08837
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6da08837

Branch: refs/heads/branch-2.9
Commit: 6da08837da9bb0f409626b41c9583d6d15da9a5b
Parents: 93220f9
Author: Jian He 
Authored: Tue Jan 23 14:03:53 2018 -0800
Committer: Jian He 
Committed: Tue Jan 23 14:07:41 2018 -0800

--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6da08837/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 71dd72a..ce8c0f1 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1659,7 +1659,7 @@
   
 Property to enable docker user remapping
 
yarn.nodemanager.runtime.linux.docker.enable-userremapping.allowed
-false
+true
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. Contributed by Eric Yang

2018-01-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 1de071257 -> 92f789a13


YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. 
Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/92f789a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/92f789a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/92f789a1

Branch: refs/heads/branch-3.0
Commit: 92f789a131d8ae5ee46302f3c88554e7314f516f
Parents: 1de0712
Author: Jian He 
Authored: Tue Jan 23 14:03:53 2018 -0800
Committer: Jian He 
Committed: Tue Jan 23 14:06:48 2018 -0800

--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/92f789a1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 4e26be6..f3a9b09 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1656,7 +1656,7 @@
   
 Property to enable docker user remapping
 
yarn.nodemanager.runtime.linux.docker.enable-userremapping.allowed
-false
+true
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. Contributed by Eric Yang

2018-01-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk a72cdcc47 -> 39b999aba


YARN-7782. Enable user re-mapping for Docker containers in yarn-default.xml. 
Contributed by Eric Yang


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/39b999ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/39b999ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/39b999ab

Branch: refs/heads/trunk
Commit: 39b999aba2c1a1fc1df17bdf952834ba516f3b2a
Parents: a72cdcc
Author: Jian He 
Authored: Tue Jan 23 14:03:53 2018 -0800
Committer: Jian He 
Committed: Tue Jan 23 14:03:53 2018 -0800

--
 .../hadoop-yarn-common/src/main/resources/yarn-default.xml | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/39b999ab/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a9938c3..45aa635 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1688,7 +1688,7 @@
   
 Property to enable docker user remapping
 
yarn.nodemanager.runtime.linux.docker.enable-userremapping.allowed
-false
+true
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-15185. Update adls connector to use the current version of ADLS SDK. Contributed by Atul Sikaria.

2018-01-23 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/branch-3.0 304642322 -> 1de071257


HADOOP-15185. Update adls connector to use the current version of ADLS SDK.
Contributed by Atul Sikaria.

(cherry picked from commit f63d13f10db708228482b53883863c873c89f69c)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1de07125
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1de07125
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1de07125

Branch: refs/heads/branch-3.0
Commit: 1de0712578c8f558dc2fc358092411c5edf108c4
Parents: 3046423
Author: Steve Loughran 
Authored: Tue Jan 23 11:37:33 2018 -0800
Committer: Steve Loughran 
Committed: Tue Jan 23 11:37:33 2018 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1de07125/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 41dbe0b..9524292 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -102,7 +102,6 @@
 
   
 
-
   
 
   
@@ -110,7 +109,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.2.3
+  2.2.5
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: YARN-7766. Introduce a new config property for YARN Service dependency tarball location. Contributed by Gour Saha

2018-01-23 Thread jianhe
Repository: hadoop
Updated Branches:
  refs/heads/trunk f63d13f10 -> a72cdcc47


YARN-7766. Introduce a new config property for YARN Service dependency tarball 
location. Contributed by Gour Saha


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a72cdcc4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a72cdcc4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a72cdcc4

Branch: refs/heads/trunk
Commit: a72cdcc47a1b3a4fcadbc9e12bd1bae2bbf04d83
Parents: f63d13f
Author: Jian He 
Authored: Tue Jan 23 10:53:27 2018 -0800
Committer: Jian He 
Committed: Tue Jan 23 10:53:27 2018 -0800

--
 .../yarn/service/client/ServiceClient.java  | 35 +++-
 .../yarn/service/conf/YarnServiceConf.java  |  6 
 .../tarball/TarballProviderService.java |  4 +--
 .../yarn/service/utils/CoreFileSystem.java  | 32 --
 .../hadoop/yarn/client/api/AppAdminClient.java  | 12 ---
 .../hadoop/yarn/client/cli/ApplicationCLI.java  | 13 +---
 .../hadoop/yarn/client/cli/TestYarnCLI.java |  5 ++-
 7 files changed, 68 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a72cdcc4/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index c224089..b8034b3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.*;
 import org.apache.hadoop.yarn.client.api.AppAdminClient;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.client.api.YarnClientApplication;
+import org.apache.hadoop.yarn.client.cli.ApplicationCLI;
 import org.apache.hadoop.yarn.client.util.YarnClientUtils;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -718,14 +719,20 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 libPath, "lib", false);
 Path dependencyLibTarGzip = fs.getDependencyTarGzip();
 if (fs.isFile(dependencyLibTarGzip)) {
-  LOG.debug("Loading lib tar from " + fs.getFileSystem().getScheme() + ":/"
-  + dependencyLibTarGzip);
+  LOG.info("Loading lib tar from " + dependencyLibTarGzip);
   fs.submitTarGzipAndUpdate(localResources);
 } else {
+  if (dependencyLibTarGzip != null) {
+LOG.warn("Property {} has a value {}, but is not a valid file",
+YarnServiceConf.DEPENDENCY_TARBALL_PATH, dependencyLibTarGzip);
+  }
   String[] libs = ServiceUtils.getLibDirs();
-  LOG.info("Uploading all dependency jars to HDFS. For faster submission 
of" +
-  " apps, pre-upload dependency jars to HDFS "
-  + "using command: yarn app -enableFastLaunch");
+  LOG.info("Uploading all dependency jars to HDFS. For faster submission 
of"
+  + " apps, set config property {} to the dependency tarball location."
+  + " Dependency tarball can be uploaded to any HDFS path directly"
+  + " or by using command: yarn app -{} []",
+  YarnServiceConf.DEPENDENCY_TARBALL_PATH,
+  ApplicationCLI.ENABLE_FAST_LAUNCH);
   for (String libDirProp : libs) {
 ProviderUtils.addAllDependencyJars(localResources, fs, libPath, "lib",
 libDirProp);
@@ -988,16 +995,23 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 return this.yarnClient;
   }
 
-  public int enableFastLaunch() throws IOException, YarnException {
-return actionDependency(true);
+  public int enableFastLaunch(String destinationFolder)
+  throws IOException, YarnException {
+return actionDependency(destinationFolder, true);
   }
 
-  public int actionDependency(boolean overwrite)
+  public int actionDependency(String destinationFolder, boolean overwrite)
   throws IOException, YarnException {
 String currentUser = 

hadoop git commit: HDFS-12986. Ozone: Update ozone to latest ratis snapshot build (0.1.1-alpha-0f7169d-SNAPSHOT). Contributed by Lokesh Jain

2018-01-23 Thread szetszwo
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-7240 a6c2b6694 -> 65b90385f


HDFS-12986. Ozone: Update ozone to latest ratis snapshot build 
(0.1.1-alpha-0f7169d-SNAPSHOT).  Contributed by Lokesh Jain


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/65b90385
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/65b90385
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/65b90385

Branch: refs/heads/HDFS-7240
Commit: 65b90385fdf78aeef7067cd55b41177df3782a8b
Parents: a6c2b66
Author: Tsz-Wo Nicholas Sze 
Authored: Tue Jan 23 09:28:03 2018 -0800
Committer: Tsz-Wo Nicholas Sze 
Committed: Tue Jan 23 09:28:03 2018 -0800

--
 .../hadoop-client-runtime/pom.xml   |  1 +
 .../apache/hadoop/scm/XceiverClientRatis.java   | 39 
 .../server/ratis/XceiverServerRatis.java| 20 +-
 hadoop-project/pom.xml  |  2 +-
 4 files changed, 43 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/65b90385/hadoop-client-modules/hadoop-client-runtime/pom.xml
--
diff --git a/hadoop-client-modules/hadoop-client-runtime/pom.xml 
b/hadoop-client-modules/hadoop-client-runtime/pom.xml
index 7ed5ba7..5b101f4 100644
--- a/hadoop-client-modules/hadoop-client-runtime/pom.xml
+++ b/hadoop-client-modules/hadoop-client-runtime/pom.xml
@@ -157,6 +157,7 @@
   
   
   com.google.code.findbugs:jsr305
+  io.dropwizard.metrics:metrics-core
 
   
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/65b90385/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
index 12ee328..3bc70ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/XceiverClientRatis.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.scm;
 
 import com.google.common.base.Preconditions;
+import com.google.protobuf.InvalidProtocolBufferException;
 import org.apache.hadoop.conf.Configuration;
 import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandRequestProto;
 import 
org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerCommandResponseProto;
@@ -42,6 +43,7 @@ import java.io.IOException;
 import java.util.List;
 import java.util.Objects;
 import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.CompletionException;
 import java.util.concurrent.ExecutionException;
 import java.util.concurrent.atomic.AtomicReference;
 
@@ -68,7 +70,9 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   private final AtomicReference client = new AtomicReference<>();
   private final int maxOutstandingRequests;
 
-  /** Constructs a client. */
+  /**
+   * Constructs a client.
+   */
   private XceiverClientRatis(Pipeline pipeline, RpcType rpcType,
   int maxOutStandingChunks) {
 super();
@@ -78,7 +82,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
   }
 
   /**
-   *  {@inheritDoc}
+   * {@inheritDoc}
*/
   public void createPipeline(String clusterId, List datanodes)
   throws IOException {
@@ -90,6 +94,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 
   /**
* Returns Ratis as pipeline Type.
+   *
* @return - Ratis
*/
   @Override
@@ -97,8 +102,7 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 return OzoneProtos.ReplicationType.RATIS;
   }
 
-  private void reinitialize(
-  List datanodes, RaftGroup group)
+  private void reinitialize(List datanodes, RaftGroup group)
   throws IOException {
 if (datanodes.isEmpty()) {
   return;
@@ -124,8 +128,9 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 
   /**
* Adds a new peers to the Ratis Ring.
+   *
* @param datanode - new datanode
-   * @param group - Raft group
+   * @param group- Raft group
* @throws IOException - on Failure.
*/
   private void reinitialize(DatanodeID datanode, RaftGroup group)
@@ -141,8 +146,6 @@ public final class XceiverClientRatis extends 
XceiverClientSpi {
 }
   }
 
-
-
   @Override
   public Pipeline getPipeline() {
 return pipeline;
@@ -216,6 +219,16 

hadoop git commit: HADOOP-15185. Update adls connector to use the current version of ADLS SDK. Contributed by Atul Sikaria.

2018-01-23 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/trunk 6347b2253 -> f63d13f10


HADOOP-15185. Update adls connector to use the current version of ADLS SDK.
Contributed by Atul Sikaria.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f63d13f1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f63d13f1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f63d13f1

Branch: refs/heads/trunk
Commit: f63d13f10db708228482b53883863c873c89f69c
Parents: 6347b22
Author: Steve Loughran 
Authored: Tue Jan 23 09:09:11 2018 -0800
Committer: Steve Loughran 
Committed: Tue Jan 23 09:09:11 2018 -0800

--
 hadoop-tools/hadoop-azure-datalake/pom.xml | 3 +--
 1 file changed, 1 insertion(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f63d13f1/hadoop-tools/hadoop-azure-datalake/pom.xml
--
diff --git a/hadoop-tools/hadoop-azure-datalake/pom.xml 
b/hadoop-tools/hadoop-azure-datalake/pom.xml
index 660bfb9..3ac84ff 100644
--- a/hadoop-tools/hadoop-azure-datalake/pom.xml
+++ b/hadoop-tools/hadoop-azure-datalake/pom.xml
@@ -102,7 +102,6 @@
 
   
 
-
   
 
   
@@ -110,7 +109,7 @@
 
   com.microsoft.azure
   azure-data-lake-store-sdk
-  2.2.3
+  2.2.5
 
 
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. Contributed by Uma Maheswara Rao G.

2018-01-23 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 0c41c0858 -> 72eb1d4c9


HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. 
Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/72eb1d4c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/72eb1d4c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/72eb1d4c

Branch: refs/heads/HDFS-10285
Commit: 72eb1d4c9d881dbe88b91ea18d664d956595a5cf
Parents: 0c41c08
Author: Rakesh Radhakrishnan 
Authored: Tue Jan 23 20:09:26 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Tue Jan 23 20:09:26 2018 +0530

--
 .../sps/BlockStorageMovementNeeded.java |  70 +++-
 .../hdfs/server/namenode/sps/Context.java   |   8 +
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   2 +
 .../namenode/sps/IntraSPSNameNodeContext.java   |   7 +
 .../sps/IntraSPSNameNodeFileIdCollector.java|   6 +-
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java|   8 +-
 .../server/sps/ExternalSPSFileIDCollector.java  | 156 +
 .../hadoop/hdfs/server/sps/package-info.java|  28 ++
 .../sps/TestStoragePolicySatisfier.java | 323 ++-
 .../sps/TestExternalStoragePolicySatisfier.java | 108 +++
 11 files changed, 556 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/72eb1d4c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index 39a0051..b141502 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -97,23 +97,53 @@ public class BlockStorageMovementNeeded {
   }
 
   /**
-   * Add the itemInfo to tracking list for which storage movement
-   * expected if necessary.
+   * Add the itemInfo list to tracking list for which storage movement expected
+   * if necessary.
+   *
* @param startId
-   *- start id
+   *  - start id
* @param itemInfoList
-   *- List of child in the directory
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the start id directory has no more elements to
+   *  scan.
*/
   @VisibleForTesting
-  public synchronized void addAll(long startId,
-  List itemInfoList, boolean scanCompleted) {
+  public synchronized void addAll(long startId, List itemInfoList,
+  boolean scanCompleted) {
 storageMovementNeeded.addAll(itemInfoList);
+updatePendingDirScanStats(startId, itemInfoList.size(), scanCompleted);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement expected if
+   * necessary.
+   *
+   * @param itemInfoList
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the ItemInfo start id directory has no more
+   *  elements to scan.
+   */
+  @VisibleForTesting
+  public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
+storageMovementNeeded.add(itemInfo);
+// This represents sps start id is file, so no need to update pending dir
+// stats.
+if (itemInfo.getStartId() == itemInfo.getFileId()) {
+  return;
+}
+updatePendingDirScanStats(itemInfo.getStartId(), 1, scanCompleted);
+  }
+
+  private void updatePendingDirScanStats(long startId, int numScannedFiles,
+  boolean scanCompleted) {
 DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
 if (pendingWork == null) {
   pendingWork = new DirPendingWorkInfo();
   pendingWorkForDirectory.put(startId, pendingWork);
 }
-pendingWork.addPendingWorkCount(itemInfoList.size());
+pendingWork.addPendingWorkCount(numScannedFiles);
 if (scanCompleted) {
   pendingWork.markScanCompleted();
 }
@@ -250,13 +280,15 @@ public class BlockStorageMovementNeeded {
 
 @Override
 public void run() {
-  LOG.info("Starting FileInodeIdCollector!.");
+  LOG.info("Starting SPSPathIdProcessor!.");
   long lastStatusCleanTime = 0;
+  Long startINodeId = null;
   while (ctxt.isRunning()) {
-LOG.info("Running FileInodeIdCollector!.");
 try {
 

hadoop git commit: YARN-7779. Display allocation tags in RM web UI and expose same through REST API. Contributed by Weiwei Yang.

2018-01-23 Thread sunilg
Repository: hadoop
Updated Branches:
  refs/heads/YARN-6592 dc32ecf76 -> 744046337


YARN-7779. Display allocation tags in RM web UI and expose same through REST 
API. Contributed by Weiwei Yang.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/74404633
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/74404633
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/74404633

Branch: refs/heads/YARN-6592
Commit: 7440463375ebf9a403903ce07c95239d64b0deac
Parents: dc32ecf
Author: Sunil G 
Authored: Tue Jan 23 17:09:58 2018 +0530
Committer: Sunil G 
Committed: Tue Jan 23 17:09:58 2018 +0530

--
 .../hadoop/yarn/sls/nodemanager/NodeInfo.java   |  6 ++
 .../yarn/sls/scheduler/RMNodeWrapper.java   |  6 ++
 .../server/resourcemanager/rmnode/RMNode.java   |  7 ++
 .../resourcemanager/rmnode/RMNodeImpl.java  |  6 ++
 .../constraint/AllocationTagsManager.java   | 11 +++
 .../resourcemanager/webapp/NodesPage.java   |  3 +
 .../webapp/dao/AllocationTagInfo.java   | 56 ++
 .../webapp/dao/AllocationTagsInfo.java  | 59 +++
 .../resourcemanager/webapp/dao/NodeInfo.java| 15 
 .../yarn/server/resourcemanager/MockNodes.java  |  6 ++
 .../resourcemanager/webapp/TestNodesPage.java   |  4 +-
 .../webapp/TestRMWebServicesNodes.java  | 77 +++-
 12 files changed, 253 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/74404633/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
index e71ddff..48e9991 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/nodemanager/NodeInfo.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.yarn.sls.nodemanager;
 
 import java.util.ArrayList;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 import org.apache.hadoop.classification.InterfaceAudience.Private;
@@ -220,6 +221,11 @@ public class NodeInfo {
 }
 
 @Override
+public Map getAllocationTagsWithCount() {
+  return null;
+}
+
+@Override
 public Resource getPhysicalResource() {
   return null;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74404633/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
--
diff --git 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
index 6b7ac3c..52f0481 100644
--- 
a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
+++ 
b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/RMNodeWrapper.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode
 
 import java.util.Collections;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 
 @Private
@@ -209,6 +210,11 @@ public class RMNodeWrapper implements RMNode {
   }
 
   @Override
+  public Map getAllocationTagsWithCount() {
+return node.getAllocationTagsWithCount();
+  }
+
+  @Override
   public Resource getPhysicalResource() {
 return null;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/74404633/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
index 328c040..ffc94c8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmnode/RMNode.java
@@ -20,6 +20,7 @@ package 

[33/50] [abbrv] hadoop git commit: YARN-7653. Node group support for AllocationTagsManager. (Panagiotis Garefalakis via asuresh)

2018-01-23 Thread sunilg
YARN-7653. Node group support for AllocationTagsManager. (Panagiotis 
Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/16ccca86
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/16ccca86
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/16ccca86

Branch: refs/heads/YARN-6592
Commit: 16ccca866252a5507f236dc62a7d1ba804711d87
Parents: c94ed6f
Author: Arun Suresh 
Authored: Fri Dec 22 07:24:37 2017 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:23 2018 +0530

--
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java   | 282 ++-
 .../rmcontainer/TestRMContainerImpl.java|   2 +-
 .../constraint/TestAllocationTagsManager.java   | 269 --
 4 files changed, 392 insertions(+), 163 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ccca86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
index e2e42f8..a256053 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceManager.java
@@ -494,7 +494,7 @@ public class ResourceManager extends CompositeService 
implements Recoverable {
   }
 
   protected AllocationTagsManager createAllocationTagsManager() {
-return new AllocationTagsManager();
+return new AllocationTagsManager(this.rmContext);
   }
   
   protected DelegationTokenRenewer createDelegationTokenRenewer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/16ccca86/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index c278606..7b0b959 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.log4j.Logger;
 
 import java.util.HashMap;
@@ -38,9 +39,8 @@ import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.function.LongBinaryOperator;
 
 /**
- * Support storing maps between container-tags/applications and
- * nodes. This will be required by affinity/anti-affinity implementation and
- * cardinality.
+ * In-memory mapping between applications/container-tags and nodes/racks.
+ * Required by constrained affinity/anti-affinity and cardinality placement.
  */
 @InterfaceAudience.Private
 @InterfaceStability.Unstable
@@ -51,48 +51,54 @@ public class AllocationTagsManager {
 
   private ReentrantReadWriteLock.ReadLock readLock;
   private ReentrantReadWriteLock.WriteLock writeLock;
+  private final RMContext rmContext;
 
-  // Application's tags to node
-  private Map perAppMappings =
+  // Application's tags to Node
+  private Map perAppNodeMappings =
+  new HashMap<>();
+  // Application's tags to Rack
+  private Map

[43/50] [abbrv] hadoop git commit: YARN-7774. Miscellaneous fixes to the PlacementProcessor. (asuresh)

2018-01-23 Thread sunilg
YARN-7774. Miscellaneous fixes to the PlacementProcessor. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c61beb5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c61beb5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c61beb5

Branch: refs/heads/YARN-6592
Commit: 6c61beb54c25af84a527fca28ad18bc75abd1ae8
Parents: 3fa9d27
Author: Arun Suresh 
Authored: Thu Jan 18 11:01:36 2018 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:24 2018 +0530

--
 .../scheduler/SchedulerNode.java| 14 
 .../scheduler/capacity/CapacityScheduler.java   |  4 +
 .../constraint/PlacementConstraintsUtil.java|  5 +-
 .../constraint/algorithm/CircularIterator.java  | 86 
 .../algorithm/DefaultPlacementAlgorithm.java| 50 ++--
 .../constraint/processor/BatchedRequests.java   |  8 ++
 .../SingleConstraintAppPlacementAllocator.java  |  2 +-
 .../yarn/server/resourcemanager/MockAM.java |  4 +-
 .../constraint/TestPlacementProcessor.java  | 24 +++---
 .../algorithm/TestCircularIterator.java | 84 +++
 ...stSingleConstraintAppPlacementAllocator.java | 28 +++
 11 files changed, 270 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c61beb5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
index 05dbf1e..253eb24 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java
@@ -453,6 +453,20 @@ public abstract class SchedulerNode {
 return this.nodeUtilization;
   }
 
+  @Override
+  public boolean equals(Object o) {
+if (this == o) return true;
+if (!(o instanceof SchedulerNode)) return false;
+
+SchedulerNode that = (SchedulerNode) o;
+
+return getNodeID().equals(that.getNodeID());
+  }
+
+  @Override
+  public int hashCode() {
+return getNodeID().hashCode();
+  }
 
   private static class ContainerInfo {
 private final RMContainer container;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c61beb5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 8b39aa6..b1c7298 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2550,6 +2550,10 @@ public class CapacityScheduler extends
 " but only 1 will be attempted !!");
   }
   if (!appAttempt.isStopped()) {
+Resource resource =
+schedulingRequest.getResourceSizing().getResources();
+schedulingRequest.getResourceSizing().setResources(
+getNormalizedResource(resource));
 ResourceCommitRequest
 resourceCommitRequest = createResourceCommitRequest(
 appAttempt, schedulingRequest, schedulerNode);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c61beb5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java

[21/50] [abbrv] hadoop git commit: MAPREDUCE-6995. Uploader tool for Distributed Cache Deploy documentation (miklos.szeg...@cloudera.com via rkanter)

2018-01-23 Thread sunilg
MAPREDUCE-6995. Uploader tool for Distributed Cache Deploy documentation 
(miklos.szeg...@cloudera.com via rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/836643d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/836643d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/836643d7

Branch: refs/heads/YARN-6592
Commit: 836643d793c68bf1bee883abece84f024591da7c
Parents: 62c9e7f
Author: Robert Kanter 
Authored: Fri Jan 19 17:55:24 2018 -0800
Committer: Robert Kanter 
Committed: Fri Jan 19 17:57:54 2018 -0800

--
 .../site/markdown/DistributedCacheDeploy.md.vm  | 61 ++---
 .../src/site/markdown/MapredCommands.md | 19 ++
 .../mapred/uploader/FrameworkUploader.java  | 48 +
 .../mapred/uploader/TestFrameworkUploader.java  | 72 
 4 files changed, 178 insertions(+), 22 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/836643d7/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
index c69be1c..4552235 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/site/markdown/DistributedCacheDeploy.md.vm
@@ -12,10 +12,6 @@
   limitations under the License. See accompanying LICENSE file.
 -->
 
-#set ( $H3 = '###' )
-#set ( $H4 = '' )
-#set ( $H5 = '#' )
-
 Hadoop: Distributed Cache Deploy
 
 
@@ -55,23 +51,41 @@ Deploying a new MapReduce version consists of three steps:
 1.  Upload the MapReduce archive to a location that can be accessed by the
 job submission client. Ideally the archive should be on the cluster's 
default
 filesystem at a publicly-readable path. See the archive location discussion
-below for more details.
+below for more details. You can use the framework uploader tool to perform
+this step like
+`mapred frameworkuploader -target
+
hdfs:///mapred/framework/hadoop-mapreduce-${project.version}.tar#mrframework`.
+It will select the jar files that are in the classpath and put them into
+a tar archive specified by the -target and -fs options. The tool then 
returns
+a suggestion of how to set `mapreduce.application.framework.path` and
+`mapreduce.application.classpath`.
+
+`-fs`: The target file system. Defaults to the default filesystem set by
+`fs.defaultFS`.
+
+`-target` is the target location of the framework tarball, optionally 
followed
+ by a # with the localized alias. It then uploads the tar to the specified
+ directory. gzip is not needed since the jar files are already compressed.
+ Make sure the target directory is readable by all users but it is not
+ writable by others than administrators to protect cluster security.
 
 2.  Configure `mapreduce.application.framework.path` to point to the
 location where the archive is located. As when specifying distributed cache
 files for a job, this is a URL that also supports creating an alias for the
 archive if a URL fragment is specified. For example,
-
`hdfs:/mapred/framework/hadoop-mapreduce-${project.version}.tar.gz#mrframework`
+
`hdfs:///mapred/framework/hadoop-mapreduce-${project.version}.tar.gz#mrframework`
 will be localized as `mrframework` rather than
 `hadoop-mapreduce-${project.version}.tar.gz`.
 
 3.  Configure `mapreduce.application.classpath` to set the proper
-classpath to use with the MapReduce archive configured above. NOTE: An 
error
+classpath to use with the MapReduce archive configured above.
+If the `frameworkuploader` tool is used, it uploads all dependencies
+and returns the value that needs to be configured here. NOTE: An error
 occurs if `mapreduce.application.framework.path` is configured but
 `mapreduce.application.classpath` does not reference the base name of the
 archive path or the alias if an alias was specified.
 
-$H3 Location of the MapReduce Archive and How It Affects Job Performance
+### Location of the MapReduce Archive and How It Affects Job Performance
 
 Note that the location of the MapReduce archive can be critical to job 
submission and job startup performance. If the archive is not located on the 
cluster's default filesystem 

[17/50] [abbrv] hadoop git commit: YARN-7537 [Atsv2] load hbase configuration from filesystem rather than URL. Contributed by Rohith Sharma

2018-01-23 Thread sunilg
YARN-7537 [Atsv2] load hbase configuration from filesystem rather than URL. 
Contributed by Rohith Sharma


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec8f47e7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec8f47e7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec8f47e7

Branch: refs/heads/YARN-6592
Commit: ec8f47e7fadbe62c0c39390d0a46cefd50e98492
Parents: cce71dc
Author: Vrushali C 
Authored: Fri Jan 19 15:34:40 2018 -0800
Committer: Vrushali C 
Committed: Fri Jan 19 15:34:40 2018 -0800

--
 .../src/main/resources/yarn-default.xml |  2 +-
 .../common/HBaseTimelineStorageUtils.java   | 40 ++--
 2 files changed, 29 insertions(+), 13 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec8f47e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 017799a..b83673f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -2455,7 +2455,7 @@
   
 
   
- Optional URL to an hbase-site.xml configuration file to be
+ Optional FS path to an hbase-site.xml configuration file to 
be
 used to connect to the timeline-service hbase cluster. If empty or not
 specified, then the HBase configuration will be loaded from the classpath.
 When specified the values in the specified configuration file will override

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec8f47e7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
index c115b18..c25a0d2 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-timelineservice-hbase/src/main/java/org/apache/hadoop/yarn/server/timelineservice/storage/common/HBaseTimelineStorageUtils.java
@@ -18,13 +18,14 @@
 package org.apache.hadoop.yarn.server.timelineservice.storage.common;
 
 import java.io.IOException;
-import java.net.MalformedURLException;
-import java.net.URL;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HBaseConfiguration;
@@ -269,28 +270,43 @@ public final class HBaseTimelineStorageUtils {
* @return a configuration with the HBase configuration from the classpath,
* optionally overwritten by the timeline service configuration URL 
if
* specified.
-   * @throws MalformedURLException if a timeline service HBase configuration 
URL
-   *   is specified but is a malformed URL.
+   * @throws IOException if a timeline service HBase configuration path
+   *   is specified but unable to read it.
*/
   public static Configuration getTimelineServiceHBaseConf(Configuration conf)
-  throws MalformedURLException {
+  throws IOException {
 if (conf == null) {
   throw new NullPointerException();
 }
 
 Configuration hbaseConf;
-String timelineServiceHBaseConfFileURL =
+String timelineServiceHBaseConfFilePath =
 conf.get(YarnConfiguration.TIMELINE_SERVICE_HBASE_CONFIGURATION_FILE);
-if (timelineServiceHBaseConfFileURL != null
-&& timelineServiceHBaseConfFileURL.length() > 0) {
+if (timelineServiceHBaseConfFilePath != null
+&& timelineServiceHBaseConfFilePath.length() > 0) {
   LOG.info("Using hbase configuration at " +
-  

[11/50] [abbrv] hadoop git commit: YARN-7753. [UI2] Application logs has to be pulled from ATS 1.5 instead of ATS2. Contributed by Sunil G.

2018-01-23 Thread sunilg
YARN-7753. [UI2] Application logs has to be pulled from ATS 1.5 instead of 
ATS2. Contributed by Sunil G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c5bbd641
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c5bbd641
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c5bbd641

Branch: refs/heads/YARN-6592
Commit: c5bbd6418ed1a7b78bf5bd6c1e0fad1dc9fab300
Parents: 9e4f52d
Author: Rohith Sharma K S 
Authored: Fri Jan 19 15:48:16 2018 +0530
Committer: Rohith Sharma K S 
Committed: Fri Jan 19 15:48:16 2018 +0530

--
 .../src/main/webapp/app/adapters/yarn-log.js|  2 +-
 .../src/main/webapp/app/initializers/loader.js  | 41 +++-
 .../src/main/webapp/app/serializers/yarn-log.js |  6 +++
 .../src/main/webapp/app/services/hosts.js   |  4 ++
 .../src/main/webapp/config/default-config.js|  1 +
 5 files changed, 52 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5bbd641/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js
index df29b71..979ec79 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/adapters/yarn-log.js
@@ -19,7 +19,7 @@
 import AbstractAdapter from './abstract';
 
 export default AbstractAdapter.extend({
-  address: "timelineWebAddress",
+  address: "timelineV1WebAddress",
   // restNameSpace: "timelineV2", // Use ATSv2 when it supports log APIs.
   restNameSpace: "timeline", //Using ATSv1.5 now, would be supported by ATSv2 
very soon.
   serverName: "ATS",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5bbd641/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
index 9d63de3..58c6354 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/initializers/loader.js
@@ -25,7 +25,16 @@ function getTimeLineURL(rmhost) {
 (ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + 
rmhost;
 
   url += '/conf?name=yarn.timeline-service.reader.webapp.address';
-  Ember.Logger.log("Get Timeline Address URL: " + url);
+  Ember.Logger.log("Get Timeline V2 Address URL: " + url);
+  return url;
+}
+
+function getTimeLineV1URL(rmhost) {
+  var url = window.location.protocol + '//' +
+(ENV.hosts.localBaseAddress? ENV.hosts.localBaseAddress + '/' : '') + 
rmhost;
+
+  url += '/conf?name=yarn.timeline-service.webapp.address';
+  Ember.Logger.log("Get Timeline V1 Address URL: " + url);
   return url;
 }
 
@@ -70,6 +79,36 @@ function updateConfigs(application) {
 Ember.Logger.log("Timeline Address: " + ENV.hosts.timelineWebAddress);
 application.advanceReadiness();
   }
+
+  if(!ENV.hosts.timelineV1WebAddress) {
+var timelinehost = "";
+$.ajax({
+  type: 'GET',
+  dataType: 'json',
+  async: true,
+  context: this,
+  url: getTimeLineV1URL(rmhost),
+  success: function(data) {
+timelinehost = data.property.value;
+ENV.hosts.timelineV1WebAddress = timelinehost;
+
+var address = timelinehost.split(":")[0];
+var port = timelinehost.split(":")[1];
+
+Ember.Logger.log("Timeline V1 Address from RM: " + timelinehost);
+
+if(address === "0.0.0.0" || address === "localhost") {
+  var updatedAddress =  hostname + ":" + port;
+  ENV.hosts.timelineV1WebAddress = updatedAddress;
+  Ember.Logger.log("Timeline V1 Updated Address: " + updatedAddress);
+}
+application.advanceReadiness();
+  }
+});
+  } else {
+Ember.Logger.log("Timeline V1 Address: " + ENV.hosts.timelineV1WebAddress);
+application.advanceReadiness();
+  }
 }
 
 export function initialize( application ) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c5bbd641/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-log.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/serializers/yarn-log.js
 

[46/50] [abbrv] hadoop git commit: YARN-7745. Allow DistributedShell to take a placement specification for containers it wants to launch. (Arun Suresh via wangda)

2018-01-23 Thread sunilg
YARN-7745. Allow DistributedShell to take a placement specification for 
containers it wants to launch. (Arun Suresh via wangda)

Change-Id: Ided146d662e944a8a4692e5d6885f23fd9bbcad5


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fa9d27b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fa9d27b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fa9d27b

Branch: refs/heads/YARN-6592
Commit: 3fa9d27ba258e2e623457ea558ce296ce8754d7d
Parents: d422373
Author: Wangda Tan 
Authored: Thu Jan 18 14:22:45 2018 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:24 2018 +0530

--
 .../distributedshell/ApplicationMaster.java | 124 +++--
 .../applications/distributedshell/Client.java   |  14 ++
 .../distributedshell/PlacementSpec.java | 137 +++
 3 files changed, 263 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fa9d27b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
index bd810c1..d6d242c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java
@@ -42,6 +42,7 @@ import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.Arrays;
+import java.util.concurrent.atomic.AtomicLong;
 
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.GnuParser;
@@ -87,8 +88,11 @@ import 
org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.NodeReport;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.ProfileCapability;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.api.records.ResourceSizing;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 import org.apache.hadoop.yarn.api.records.ExecutionType;
@@ -99,6 +103,7 @@ import 
org.apache.hadoop.yarn.api.records.timeline.TimelineEntity;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEntityGroupId;
 import org.apache.hadoop.yarn.api.records.timeline.TimelineEvent;
 import org.apache.hadoop.yarn.api.records.timeline.TimelinePutResponse;
+import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.TimelineClient;
 import org.apache.hadoop.yarn.client.api.TimelineV2Client;
@@ -274,6 +279,10 @@ public class ApplicationMaster {
   @VisibleForTesting
   protected AtomicInteger numRequestedContainers = new AtomicInteger();
 
+  protected AtomicInteger numIgnore = new AtomicInteger();
+
+  protected AtomicInteger totalRetries = new AtomicInteger(10);
+
   // Shell command to be executed
   private String shellCommand = "";
   // Args to be passed to the shell command
@@ -289,6 +298,9 @@ public class ApplicationMaster {
   // File length needed for local resource
   private long shellScriptPathLen = 0;
 
+  // Placement Specifications
+  private Map placementSpecs = null;
+
   // Container retry options
   private ContainerRetryPolicy containerRetryPolicy =
   ContainerRetryPolicy.NEVER_RETRY;
@@ -334,6 +346,7 @@ public class ApplicationMaster {
   private final String windows_command = "cmd /c";
 
   private int yarnShellIdCounter = 1;
+  private final AtomicLong allocIdCounter = new AtomicLong(1);
 
   @VisibleForTesting
   protected final Set launchedContainers =
@@ -457,6 +470,7 @@ public class ApplicationMaster {
 "If container could retry, it specifies max 

[50/50] [abbrv] hadoop git commit: YARN-7763. Allow Constraints specified in the SchedulingRequest to override application level constraints. (Weiwei Yang via asuresh)

2018-01-23 Thread sunilg
YARN-7763. Allow Constraints specified in the SchedulingRequest to override 
application level constraints. (Weiwei Yang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/743b0b32
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/743b0b32
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/743b0b32

Branch: refs/heads/YARN-6592
Commit: 743b0b32440a853b536a4a5b8cd69a4fd36ca072
Parents: 6c61beb
Author: Arun Suresh 
Authored: Sun Jan 21 19:11:17 2018 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:24 2018 +0530

--
 .../scheduler/capacity/CapacityScheduler.java   |  4 +-
 .../constraint/PlacementConstraintsUtil.java| 98 +++-
 .../algorithm/DefaultPlacementAlgorithm.java|  4 +-
 .../SingleConstraintAppPlacementAllocator.java  | 10 +-
 .../TestPlacementConstraintsUtil.java   | 94 ---
 5 files changed, 123 insertions(+), 87 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/743b0b32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index b1c7298..8f25646 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -2561,9 +2561,9 @@ public class CapacityScheduler extends
 // Validate placement constraint is satisfied before
 // committing the request.
 try {
-  if (!PlacementConstraintsUtil.canSatisfySingleConstraint(
+  if (!PlacementConstraintsUtil.canSatisfyConstraints(
   appAttempt.getApplicationId(),
-  schedulingRequest.getAllocationTags(), schedulerNode,
+  schedulingRequest, schedulerNode,
   rmContext.getPlacementConstraintManager(),
   rmContext.getAllocationTagsManager())) {
 LOG.debug("Failed to allocate container for application "

http://git-wip-us.apache.org/repos/asf/hadoop/blob/743b0b32/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
index ff5cb67..c07c16f 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/PlacementConstraintsUtil.java
@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience.Public;
 import org.apache.hadoop.classification.InterfaceStability.Unstable;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
 import org.apache.hadoop.yarn.api.resource.PlacementConstraint;
 import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.AbstractConstraint;
 import 
org.apache.hadoop.yarn.api.resource.PlacementConstraint.SingleConstraint;
@@ -54,7 +55,7 @@ public final class PlacementConstraintsUtil {
   }
 
   /**
-   * Returns true if **single** placement constraint with associated
+   * Returns true if single placement constraint with associated
* allocationTags and scope is satisfied by a specific scheduler Node.
*
* 

[02/50] [abbrv] hadoop git commit: HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D arg). Contributed by Brahma Reddy Battula.

2018-01-23 Thread sunilg
HADOOP-15150. in FsShell, UGI params should be overidden through env vars(-D 
arg). Contributed by Brahma Reddy Battula.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/08332e12
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/08332e12
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/08332e12

Branch: refs/heads/YARN-6592
Commit: 08332e12d055d85472f0c9371fefe9b56bfea1ed
Parents: cdaf92c
Author: Brahma Reddy Battula 
Authored: Thu Jan 18 10:54:32 2018 +0530
Committer: Brahma Reddy Battula 
Committed: Thu Jan 18 10:54:32 2018 +0530

--
 .../src/main/java/org/apache/hadoop/fs/FsShell.java   |  2 ++
 .../java/org/apache/hadoop/fs/TestFsShellList.java| 14 ++
 2 files changed, 16 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/08332e12/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
index 721f4df..94d3389 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FsShell.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFactory;
 import org.apache.hadoop.fs.shell.FsCommand;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.tools.TableListing;
 import org.apache.hadoop.tracing.TraceUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -99,6 +100,7 @@ public class FsShell extends Configured implements Tool {
   
   protected void init() throws IOException {
 getConf().setQuietMode(true);
+UserGroupInformation.setConfiguration(getConf());
 if (commandFactory == null) {
   commandFactory = new CommandFactory(getConf());
   commandFactory.addObject(new Help(), "-help");

http://git-wip-us.apache.org/repos/asf/hadoop/blob/08332e12/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
index 03720d3..c780f41 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFsShellList.java
@@ -75,4 +75,18 @@ public class TestFsShellList {
 lsArgv = new String[]{"-ls", "-q", testRootDir.toString()};
 assertThat(shell.run(lsArgv), is(0));
   }
+
+  /*
+  UGI params should take effect when we pass.
+ */
+  @Test(expected = IllegalArgumentException.class)
+  public void testListWithUGI() throws Exception {
+FsShell fsShell = new FsShell(new Configuration());
+//Passing Dummy such that it should through IAE
+fsShell.getConf()
+.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
+"DUMMYAUTH");
+String[] lsArgv = new String[] {"-ls", testRootDir.toString()};
+fsShell.run(lsArgv);
+  }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/50] [abbrv] hadoop git commit: YARN-7750. [UI2] Render time related fields in all pages to the browser timezone. Contributed by Vasudevan Skm.

2018-01-23 Thread sunilg
YARN-7750. [UI2] Render time related fields in all pages to the browser 
timezone. Contributed by Vasudevan Skm.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8e5472b1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8e5472b1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8e5472b1

Branch: refs/heads/YARN-6592
Commit: 8e5472b1e63f1c50e253e64702468da2bb38e476
Parents: 1093a73
Author: Sunil G 
Authored: Thu Jan 18 18:49:19 2018 +0530
Committer: Sunil G 
Committed: Thu Jan 18 18:54:41 2018 +0530

--
 .../src/main/webapp/app/utils/converter.js  |  7 +++--
 .../src/main/webapp/app/utils/date-utils.js | 27 
 .../src/main/webapp/ember-cli-build.js  |  1 +
 3 files changed, 31 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e5472b1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
index 74cc916..d7de4e7 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/converter.js
@@ -17,6 +17,7 @@
  */
 
 import Constants from 'yarn-ui/constants';
+import { convertTimestampWithTz } from "./date-utils";
 
 export default {
   containerIdToAttemptId: function(containerId) {
@@ -82,12 +83,10 @@ export default {
 return total * 1000;
   },
   timeStampToDate: function(timeStamp) {
-var dateTimeString = moment(parseInt(timeStamp)).format("/MM/DD 
HH:mm:ss");
-return dateTimeString;
+return convertTimestampWithTz(timeStamp, "/MM/DD HH:mm:ss");
   },
   timeStampToDateOnly: function(timeStamp) {
-var dateTimeString = moment(parseInt(timeStamp)).format("/MM/DD");
-return dateTimeString;
+return convertTimestampWithTz(timeStamp, "/MM/DD");
   },
   dateToTimeStamp: function(date) {
 if (date) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e5472b1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
new file mode 100644
index 000..83dd200
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/app/utils/date-utils.js
@@ -0,0 +1,27 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+const defaultTz = "America/Los_Angeles";
+
+const getDefaultTimezone = () => {
+  return moment.tz.guess() || defaultTz;
+};
+
+export const convertTimestampWithTz = (timestamp, format = "/MM/DD") =>
+  moment.tz(timestamp, getDefaultTimezone()).format(format);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8e5472b1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
index 6af45fc..db09ae3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/ember-cli-build.js
@@ -42,6 +42,7 @@ module.exports = function(defaults) {
   
app.import("bower_components/datatables/media/css/jquery.dataTables.min.css");
   app.import("bower_components/datatables/media/js/jquery.dataTables.min.js");
   app.import("bower_components/momentjs/min/moment.min.js");
+  

[25/50] [abbrv] hadoop git commit: HADOOP-15121. Encounter NullPointerException when using DecayRpcScheduler. Contributed by Tao Jie.

2018-01-23 Thread sunilg
HADOOP-15121. Encounter NullPointerException when using DecayRpcScheduler. 
Contributed by Tao Jie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3fde0f1d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3fde0f1d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3fde0f1d

Branch: refs/heads/YARN-6592
Commit: 3fde0f1db599227773c0cd537b33312d368ad4d9
Parents: 97fe3cc
Author: Hanisha Koneru 
Authored: Mon Jan 22 15:54:44 2018 -0800
Committer: Hanisha Koneru 
Committed: Mon Jan 22 15:54:44 2018 -0800

--
 .../apache/hadoop/ipc/DecayRpcScheduler.java| 15 ++---
 .../hadoop/ipc/TestDecayRpcScheduler.java   | 32 ++--
 2 files changed, 39 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fde0f1d/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
index a847d11..0a00ca7 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/DecayRpcScheduler.java
@@ -236,8 +236,8 @@ public class DecayRpcScheduler implements RpcScheduler,
 DecayTask task = new DecayTask(this, timer);
 timer.scheduleAtFixedRate(task, decayPeriodMillis, decayPeriodMillis);
 
-metricsProxy = MetricsProxy.getInstance(ns, numLevels);
-metricsProxy.setDelegate(this);
+metricsProxy = MetricsProxy.getInstance(ns, numLevels, this);
+recomputeScheduleCache();
   }
 
   // Load configs
@@ -680,21 +680,26 @@ public class DecayRpcScheduler implements RpcScheduler,
 private long[] callCountInLastWindowDefault;
 private ObjectName decayRpcSchedulerInfoBeanName;
 
-private MetricsProxy(String namespace, int numLevels) {
+private MetricsProxy(String namespace, int numLevels,
+DecayRpcScheduler drs) {
   averageResponseTimeDefault = new double[numLevels];
   callCountInLastWindowDefault = new long[numLevels];
+  setDelegate(drs);
   decayRpcSchedulerInfoBeanName =
   MBeans.register(namespace, "DecayRpcScheduler", this);
   this.registerMetrics2Source(namespace);
 }
 
 public static synchronized MetricsProxy getInstance(String namespace,
-int numLevels) {
+int numLevels, DecayRpcScheduler drs) {
   MetricsProxy mp = INSTANCES.get(namespace);
   if (mp == null) {
 // We must create one
-mp = new MetricsProxy(namespace, numLevels);
+mp = new MetricsProxy(namespace, numLevels, drs);
 INSTANCES.put(namespace, mp);
+  } else  if (drs != mp.delegate.get()){
+// in case of delegate is reclaimed, we should set it again
+mp.setDelegate(drs);
   }
   return mp;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3fde0f1d/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
index 58380c5..10ab40a 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestDecayRpcScheduler.java
@@ -19,19 +19,22 @@
 package org.apache.hadoop.ipc;
 
 import static java.lang.Thread.sleep;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNull;
-import static org.junit.Assert.assertTrue;
 
 import org.junit.Test;
+
+import static org.junit.Assert.*;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
 
+import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.conf.Configuration;
 
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
+
+import java.io.ByteArrayOutputStream;
+import java.io.PrintStream;
 import java.lang.management.ManagementFactory;
 
 public class TestDecayRpcScheduler {
@@ -248,4 +251,27 @@ public class TestDecayRpcScheduler {
   sleep(10);
 }
   }
+
+  @Test(timeout=6)
+  public void testNPEatInitialization() throws InterruptedException {
+// redirect the LOG to and check if 

[45/50] [abbrv] hadoop git commit: YARN-7709. Remove SELF from TargetExpression type. (Konstantinos Karanasos via asuresh)

2018-01-23 Thread sunilg
YARN-7709. Remove SELF from TargetExpression type. (Konstantinos Karanasos via 
asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bf3aece5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bf3aece5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bf3aece5

Branch: refs/heads/YARN-6592
Commit: bf3aece5d9b1caacf02e826050c71989b2d215a7
Parents: fef39e7
Author: Arun Suresh 
Authored: Thu Jan 18 04:29:57 2018 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:24 2018 +0530

--
 .../yarn/api/resource/PlacementConstraint.java  | 32 ++
 .../yarn/api/resource/PlacementConstraints.java | 35 +---
 .../api/resource/TestPlacementConstraints.java  |  3 +-
 .../PlacementConstraintTransformations.java | 19 +++
 .../TestPlacementConstraintTransformations.java | 35 +---
 .../constraint/PlacementConstraintsUtil.java| 10 --
 6 files changed, 64 insertions(+), 70 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3aece5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
index b6e851a..4d998ac 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -242,7 +242,7 @@ public class PlacementConstraint {
  * Enum specifying the type of the target expression.
  */
 public enum TargetType {
-  NODE_ATTRIBUTE, ALLOCATION_TAG, SELF
+  NODE_ATTRIBUTE, ALLOCATION_TAG
 }
 
 private TargetType targetType;
@@ -418,23 +418,25 @@ public class PlacementConstraint {
   }
 
   /**
-   * Class that represents a cardinality constraint. Such a constraint the
-   * number of allocations within a given scope to some minimum and maximum
-   * values.
+   * Class that represents a cardinality constraint. Such a constraint allows
+   * the number of allocations with a specific set of tags and within a given
+   * scope to be between some minimum and maximum values.
*
* It is a specialized version of the {@link SingleConstraint}, where the
-   * target is self (i.e., the allocation to which the constraint is attached).
+   * target is a set of allocation tags.
*/
   public static class CardinalityConstraint extends AbstractConstraint {
 private String scope;
 private int minCardinality;
 private int maxCardinality;
+private Set allocationTags;
 
 public CardinalityConstraint(String scope, int minCardinality,
-int maxCardinality) {
+int maxCardinality, Set allocationTags) {
   this.scope = scope;
   this.minCardinality = minCardinality;
   this.maxCardinality = maxCardinality;
+  this.allocationTags = allocationTags;
 }
 
 /**
@@ -464,11 +466,21 @@ public class PlacementConstraint {
   return maxCardinality;
 }
 
+/**
+ * Get the allocation tags of the constraint.
+ *
+ * @return the allocation tags of the constraint
+ */
+public Set getAllocationTags() {
+  return allocationTags;
+}
+
 @Override
 public  T accept(Visitor visitor) {
   return visitor.visit(this);
 }
 
+
 @Override
 public boolean equals(Object o) {
   if (this == o) {
@@ -486,7 +498,11 @@ public class PlacementConstraint {
   if (maxCardinality != that.maxCardinality) {
 return false;
   }
-  return scope != null ? scope.equals(that.scope) : that.scope == null;
+  if (scope != null ? !scope.equals(that.scope) : that.scope != null) {
+return false;
+  }
+  return allocationTags != null ? 
allocationTags.equals(that.allocationTags)
+  : that.allocationTags == null;
 }
 
 @Override
@@ -494,6 +510,8 @@ public class PlacementConstraint {
   int result = scope != null ? scope.hashCode() : 0;
   result = 31 * result + minCardinality;
   result = 31 * result + maxCardinality;
+  result = 31 * result
+  + (allocationTags != null ? allocationTags.hashCode() : 0);
   return result;
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bf3aece5/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraints.java

[31/50] [abbrv] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

2018-01-23 Thread sunilg
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94ed6f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
new file mode 100644
index 000..9571f0e
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutput.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.api;
+
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.SchedulingRequest;
+
+import java.util.ArrayList;
+import java.util.List;
+
+/**
+ * Encapsulates the output of the ConstraintPlacementAlgorithm. The Algorithm
+ * is free to produce multiple of output objects at the end of each run and it
+ * must use the provided ConstraintPlacementAlgorithmOutputCollector to
+ * aggregate/collect this output. Similar to the MapReduce Mapper/Reducer
+ * which is provided a collector to collect output.
+ */
+public class ConstraintPlacementAlgorithmOutput {
+
+  private final ApplicationId applicationId;
+
+  public ConstraintPlacementAlgorithmOutput(ApplicationId applicationId) {
+this.applicationId = applicationId;
+  }
+
+  private final List placedRequests =
+  new ArrayList<>();
+
+  private final List rejectedRequests =
+  new ArrayList<>();
+
+  public List getPlacedRequests() {
+return placedRequests;
+  }
+
+  public List getRejectedRequests() {
+return rejectedRequests;
+  }
+
+  public ApplicationId getApplicationId() {
+return applicationId;
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94ed6f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
new file mode 100644
index 000..131fd42
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/api/ConstraintPlacementAlgorithmOutputCollector.java
@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing 

[24/50] [abbrv] hadoop git commit: YARN-7729. Add support for setting Docker PID namespace mode. (Contributed by Billie Rinaldi)

2018-01-23 Thread sunilg
YARN-7729.  Add support for setting Docker PID namespace mode.  (Contributed by 
Billie Rinaldi)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/97fe3cc1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/97fe3cc1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/97fe3cc1

Branch: refs/heads/YARN-6592
Commit: 97fe3cc187cb9f773777ca79db6f1c7e4d1d5a68
Parents: 22ee6f7
Author: Eric Yang 
Authored: Mon Jan 22 16:33:38 2018 -0500
Committer: Eric Yang 
Committed: Mon Jan 22 16:33:38 2018 -0500

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   8 ++
 .../src/main/resources/yarn-default.xml |   8 ++
 .../runtime/DockerLinuxContainerRuntime.java|  59 +++
 .../linux/runtime/docker/DockerRunCommand.java  |   5 +
 .../container-executor/impl/utils/docker-util.c |  55 ++
 .../container-executor/impl/utils/docker-util.h |   4 +-
 .../test/utils/test_docker_util.cc  |  98 -
 .../runtime/TestDockerContainerRuntime.java | 105 ++-
 .../src/site/markdown/DockerContainers.md   |  10 ++
 9 files changed, 348 insertions(+), 4 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/97fe3cc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 271b666..f132683 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -1831,6 +1831,14 @@ public class YarnConfiguration extends Configuration {
   public static final String DEFAULT_NM_DOCKER_DEFAULT_CONTAINER_NETWORK =
   "host";
 
+  /** Allow host pid namespace for containers. Use with care. */
+  public static final String NM_DOCKER_ALLOW_HOST_PID_NAMESPACE =
+  DOCKER_CONTAINER_RUNTIME_PREFIX + "host-pid-namespace.allowed";
+
+  /** Host pid namespace for containers is disabled by default. */
+  public static final boolean DEFAULT_NM_DOCKER_ALLOW_HOST_PID_NAMESPACE =
+  false;
+
   /**
* Whether or not users are allowed to request that Docker containers honor
* the debug deletion delay. This is useful for troubleshooting Docker

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97fe3cc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index b83673f..a9938c3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -1678,6 +1678,14 @@
   
 
   
+This configuration setting determines whether the host's PID
+  namespace is allowed for docker containers on this cluster.
+  Use with care.
+
yarn.nodemanager.runtime.linux.docker.host-pid-namespace.allowed
+false
+  
+
+  
 Property to enable docker user remapping
 
yarn.nodemanager.runtime.linux.docker.enable-userremapping.allowed
 false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/97fe3cc1/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
index 6799ce2..f54323c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/DockerLinuxContainerRuntime.java
+++ 

[10/50] [abbrv] hadoop git commit: HDFS-12973. RBF: Document global quota supporting in federation. Contributed by Yiqun Lin.

2018-01-23 Thread sunilg
HDFS-12973. RBF: Document global quota supporting in federation. Contributed by 
Yiqun Lin.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e4f52d3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e4f52d3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e4f52d3

Branch: refs/heads/YARN-6592
Commit: 9e4f52d32319828c153a3ea658520b946988ae31
Parents: bc93ac2
Author: Yiqun Lin 
Authored: Fri Jan 19 14:18:19 2018 +0800
Committer: Yiqun Lin 
Committed: Fri Jan 19 14:18:19 2018 +0800

--
 .../hdfs/tools/federation/RouterAdmin.java  |  3 +-
 .../src/site/markdown/HDFSCommands.md   |  4 +++
 .../src/site/markdown/HDFSRouterFederation.md   | 29 
 3 files changed, 35 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e4f52d3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
index d5a2d77..398288d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/federation/RouterAdmin.java
@@ -84,7 +84,8 @@ public class RouterAdmin extends Configured implements Tool {
 + "[-readonly] -owner  -group  -mode ]\n"
 + "\t[-rm ]\n"
 + "\t[-ls ]\n"
-+ "\t[-setQuota  -ns  -ss ]\n"
++ "\t[-setQuota  -nsQuota  -ssQuota "
++ "]\n"
 + "\t[-clrQuota \n";
 System.out.println(usage);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e4f52d3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 58d9547..71fc834 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -428,12 +428,16 @@ Usage:
   [-add[-readonly] -owner 
 -group  -mode ]
   [-rm ]
   [-ls ]
+  [-setQuota  -nsQuota  -ssQuota ]
+  [-clrQuota ]
 
 | COMMAND\_OPTION | Description |
 |: |: |
 | `-add` *source* *nameservice* *destination* | Add a mount table entry or 
update if it exists. |
 | `-rm` *source* | Remove mount point of specified path. |
 | `-ls` *path* | List mount points under specified path. |
+| `-setQuota` *path* `-nsQuota` *nsQuota* `-ssQuota` *ssQuota* | Set quota for 
specified path. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html) for the 
quota detail. |
+| `-clrQuota` *path* | Clear quota of given mount point. See [HDFS Quotas 
Guide](./HdfsQuotaAdminGuide.html) for the quota detail. |
 
 The commands for managing Router-based federation. See [Mount table 
management](./HDFSRouterFederation.html#Mount_table_management) for more info.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e4f52d3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
index cd3f437..75798a1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSRouterFederation.md
@@ -127,6 +127,11 @@ Examples users may encounter include the following.
 * Copy file/folder in two different nameservices.
 * Write into a file/folder being rebalanced.
 
+### Quota management
+Federation supports and controls global quota at mount table level.
+For performance reasons, the Router caches the quota usage and updates it 
periodically. These quota usage values
+will be used for quota-verification during each WRITE RPC call invoked in 
RouterRPCSever. See [HDFS Quotas Guide](./HdfsQuotaAdminGuide.html)
+for the quota detail.
 
 ### State Store
 The (logically centralized, but physically distributed) State Store maintains:
@@ -199,6 +204,21 @@ Mount table permission can be set by following command:
 
 The option mode is UNIX-style permissions for the mount table. Permissions are 
specified in octal, e.g. 0755. By default, this is set to 0755.
 
+Router-based federation supports global quota at mount table 

[30/50] [abbrv] hadoop git commit: YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos via wangda)

2018-01-23 Thread sunilg
YARN-6593. [API] Introduce Placement Constraint object. (Konstantinos Karanasos 
via wangda)

Change-Id: Id00edb7185fdf01cce6e40f920cac3585f8cbe9c


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a364a62c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a364a62c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a364a62c

Branch: refs/heads/YARN-6592
Commit: a364a62c7ffd8e01ee9bb183124e52e4899e22fa
Parents: 6347b22
Author: Wangda Tan 
Authored: Thu Aug 3 14:03:55 2017 -0700
Committer: Sunil G 
Committed: Tue Jan 23 15:20:22 2018 +0530

--
 .../yarn/api/resource/PlacementConstraint.java  | 567 +++
 .../yarn/api/resource/PlacementConstraints.java | 286 ++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../src/main/proto/yarn_protos.proto|  55 ++
 .../api/resource/TestPlacementConstraints.java  | 106 
 .../PlacementConstraintFromProtoConverter.java  | 116 
 .../pb/PlacementConstraintToProtoConverter.java | 174 ++
 .../apache/hadoop/yarn/api/pb/package-info.java |  23 +
 .../yarn/api/records/impl/pb/ProtoUtils.java|  27 +
 .../PlacementConstraintTransformations.java | 209 +++
 .../hadoop/yarn/api/resource/package-info.java  |  23 +
 .../TestPlacementConstraintPBConversion.java| 195 +++
 .../TestPlacementConstraintTransformations.java | 183 ++
 13 files changed, 1987 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a364a62c/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
new file mode 100644
index 000..f0e3982
--- /dev/null
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/resource/PlacementConstraint.java
@@ -0,0 +1,567 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.yarn.api.resource;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.hadoop.classification.InterfaceAudience.Private;
+import org.apache.hadoop.classification.InterfaceAudience.Public;
+import org.apache.hadoop.classification.InterfaceStability.Unstable;
+
+/**
+ * {@code PlacementConstraint} represents a placement constraint for a resource
+ * allocation.
+ */
+@Public
+@Unstable
+public class PlacementConstraint {
+
+  /**
+   * The constraint expression tree.
+   */
+  private AbstractConstraint constraintExpr;
+
+  public PlacementConstraint(AbstractConstraint constraintExpr) {
+this.constraintExpr = constraintExpr;
+  }
+
+  /**
+   * Get the constraint expression of the placement constraint.
+   *
+   * @return the constraint expression
+   */
+  public AbstractConstraint getConstraintExpr() {
+return constraintExpr;
+  }
+
+  /**
+   * Interface used to enable the elements of the constraint tree to be 
visited.
+   */
+  @Private
+  public interface Visitable {
+/**
+ * Visitor pattern.
+ *
+ * @param visitor visitor to be used
+ * @param  defines the type that the visitor will use and the return 
type
+ *  of the accept.
+ * @return the result of visiting a given object.
+ */
+ T accept(Visitor visitor);
+
+  }
+
+  /**
+   * Visitor API for a constraint tree.
+   *
+   * @param  determines the return type of the visit methods.
+   */
+  @Private
+  public interface Visitor {
+T visit(SingleConstraint constraint);
+
+T visit(TargetExpression target);
+
+T visit(TargetConstraint constraint);
+
+T visit(CardinalityConstraint constraint);
+
+T visit(And constraint);
+

[49/50] [abbrv] hadoop git commit: YARN-6599. Support anti-affinity constraint via AppPlacementAllocator. (Wangda Tan via asuresh)

2018-01-23 Thread sunilg
YARN-6599. Support anti-affinity constraint via AppPlacementAllocator. (Wangda 
Tan via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4223731
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4223731
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4223731

Branch: refs/heads/YARN-6592
Commit: d422373172c941c0b7c2d011e6bc6c9776551105
Parents: bf3aece
Author: Arun Suresh 
Authored: Thu Jan 18 14:10:30 2018 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:24 2018 +0530

--
 .../v2/app/rm/TestRMContainerAllocator.java |  15 +-
 .../sls/scheduler/SLSCapacityScheduler.java |  15 +-
 .../yarn/sls/scheduler/SLSFairScheduler.java|  12 +-
 .../dev-support/findbugs-exclude.xml|   8 +
 .../yarn/api/resource/PlacementConstraints.java |  43 +-
 .../hadoop/yarn/conf/YarnConfiguration.java |   2 +-
 ...SchedulerInvalidResoureRequestException.java |  47 ++
 .../api/impl/TestAMRMClientOnRMRestart.java |   9 +-
 .../impl/pb/AllocateRequestPBImpl.java  |   1 +
 .../server/scheduler/SchedulerRequestKey.java   |  11 +
 .../resourcemanager/DefaultAMSProcessor.java|  13 +-
 .../rmapp/attempt/RMAppAttemptImpl.java |   5 +-
 .../scheduler/AbstractYarnScheduler.java|   3 +-
 .../scheduler/AppSchedulingInfo.java| 205 +--
 .../ApplicationPlacementAllocatorFactory.java   |  68 +++
 .../scheduler/ApplicationPlacementFactory.java  |  63 ---
 .../scheduler/ContainerUpdateContext.java   |   4 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  20 +-
 .../scheduler/YarnScheduler.java|  15 +-
 .../scheduler/capacity/CapacityScheduler.java   |  54 +-
 .../CapacitySchedulerConfiguration.java |   5 +
 .../allocator/RegularContainerAllocator.java|   3 +-
 .../scheduler/common/ContainerRequest.java  |  12 +
 .../scheduler/common/PendingAsk.java|   6 +
 .../scheduler/common/fica/FiCaSchedulerApp.java |   6 +
 .../constraint/AllocationTagsManager.java   |  71 +--
 .../constraint/AllocationTagsNamespaces.java|  31 --
 .../constraint/PlacementConstraintsUtil.java| 165 --
 .../algorithm/DefaultPlacementAlgorithm.java|   2 +-
 .../processor/PlacementProcessor.java   |   8 +-
 .../scheduler/fair/FairScheduler.java   |  12 +-
 .../scheduler/fifo/FifoScheduler.java   |   7 +-
 .../placement/AppPlacementAllocator.java|  66 ++-
 .../LocalityAppPlacementAllocator.java  |  35 +-
 .../SingleConstraintAppPlacementAllocator.java  | 531 +++
 .../server/resourcemanager/Application.java |   9 +-
 .../yarn/server/resourcemanager/MockAM.java |  51 ++
 .../attempt/TestRMAppAttemptTransitions.java|  10 +-
 .../rmcontainer/TestRMContainerImpl.java|   6 +-
 .../scheduler/TestAppSchedulingInfo.java|   4 +-
 .../capacity/CapacitySchedulerTestBase.java |  79 +++
 .../capacity/TestCapacityScheduler.java |  90 +---
 .../TestCapacitySchedulerAsyncScheduling.java   |   2 +-
 .../TestCapacitySchedulerAutoQueueCreation.java |   2 +-
 ...apacitySchedulerSchedulingRequestUpdate.java | 260 +
 .../capacity/TestIncreaseAllocationExpirer.java |   2 +-
 ...estSchedulingRequestContainerAllocation.java | 277 ++
 ...hedulingRequestContainerAllocationAsync.java | 139 +
 .../scheduler/capacity/TestUtils.java   |   2 +
 .../constraint/TestAllocationTagsManager.java   |  30 +-
 .../TestPlacementConstraintsUtil.java   |  36 +-
 .../scheduler/fair/FairSchedulerTestBase.java   |   6 +-
 .../fair/TestContinuousScheduling.java  |  10 +-
 .../scheduler/fair/TestFairScheduler.java   |  30 +-
 .../scheduler/fifo/TestFifoScheduler.java   |  28 +-
 ...stSingleConstraintAppPlacementAllocator.java | 403 ++
 56 files changed, 2557 insertions(+), 492 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4223731/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
index 85e4181..7875917 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java
+++ 

[13/50] [abbrv] hadoop git commit: HADOOP-15114. Add closeStreams(...) to IOUtils (addendum). Contributed by Ajay Kumar.

2018-01-23 Thread sunilg
HADOOP-15114. Add closeStreams(...) to IOUtils (addendum).
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d689b2d9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d689b2d9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d689b2d9

Branch: refs/heads/YARN-6592
Commit: d689b2d99c7b4d7e587225638dd8f5af0a690dcc
Parents: e5a1ad6
Author: Steve Loughran 
Authored: Fri Jan 19 14:54:13 2018 +
Committer: Steve Loughran 
Committed: Fri Jan 19 14:54:13 2018 +

--
 .../java/org/apache/hadoop/io/TestIOUtils.java  | 21 +---
 1 file changed, 9 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d689b2d9/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
index 2e46c62..467e5bc 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/TestIOUtils.java
@@ -299,13 +299,14 @@ public class TestIOUtils {
   }
 
   @Test
-  public void testCloseStreams() {
-File tmpFile = new File("deleteMe.txt");
-FileOutputStream fos = null;
-BufferedOutputStream bos = null;
+  public void testCloseStreams() throws IOException {
+File tmpFile = null;
+FileOutputStream fos;
+BufferedOutputStream bos;
 FileOutputStream nullStream = null;
 
 try {
+  tmpFile = new File(GenericTestUtils.getTestDir(), 
"testCloseStreams.txt");
   fos = new FileOutputStream(tmpFile) {
 @Override
 public void close() throws IOException {
@@ -315,19 +316,15 @@ public class TestIOUtils {
   bos = new BufferedOutputStream(
   new FileOutputStream(tmpFile)) {
 @Override
-public void close() throws IOException {
+public void close() {
   throw new NullPointerException();
 }
   };
-} catch (IOException ioe) {
-  LOG.warn("Exception in TestIOUtils.testCloseStreams: ", ioe);
-}
-try {
+
   IOUtils.closeStreams(fos, bos, nullStream);
   IOUtils.closeStreams();
-} catch (Exception ex) {
-  LOG.error("Expect IOUtils.closeStreams to close streams quietly.", ex);
-  throw ex;
+} finally {
+  FileUtils.deleteQuietly(tmpFile);
 }
 
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[04/50] [abbrv] hadoop git commit: Revert "HADOOP-13974. S3Guard CLI to support list/purge of pending multipart commits."

2018-01-23 Thread sunilg
Revert "HADOOP-13974. S3Guard CLI to support list/purge of pending multipart 
commits."

This reverts commit 35ad9b1dd279b769381ea1625d9bf776c309c5cb.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f274fe33
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f274fe33
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f274fe33

Branch: refs/heads/YARN-6592
Commit: f274fe33ea359d26a31efec42a856320a0dbb5f4
Parents: 08332e1
Author: Steve Loughran 
Authored: Thu Jan 18 12:25:34 2018 +
Committer: Steve Loughran 
Committed: Thu Jan 18 12:35:57 2018 +

--
 .../java/org/apache/hadoop/security/KDiag.java  |  30 +-
 .../java/org/apache/hadoop/fs/s3a/Invoker.java  |   7 +-
 .../apache/hadoop/fs/s3a/MultipartUtils.java| 214 --
 .../org/apache/hadoop/fs/s3a/S3AFileSystem.java |  32 +--
 .../java/org/apache/hadoop/fs/s3a/S3AUtils.java |   3 +-
 .../hadoop/fs/s3a/WriteOperationHelper.java |   5 +-
 .../hadoop/fs/s3a/commit/CommitOperations.java  |   2 +-
 .../fs/s3a/commit/MagicCommitIntegration.java   |   2 +-
 .../hadoop/fs/s3a/s3guard/S3GuardTool.java  | 287 ++-
 .../src/site/markdown/tools/hadoop-aws/index.md |   7 +-
 .../site/markdown/tools/hadoop-aws/s3guard.md   |  35 +--
 .../hadoop/fs/s3a/ITestS3AMultipartUtils.java   | 126 
 .../apache/hadoop/fs/s3a/MockS3AFileSystem.java |   7 -
 .../hadoop/fs/s3a/MultipartTestUtils.java   | 184 
 .../org/apache/hadoop/fs/s3a/S3ATestUtils.java  |  21 +-
 .../fs/s3a/commit/AbstractCommitITest.java  |   3 +-
 .../commit/magic/ITestS3AHugeMagicCommits.java  |   2 +-
 .../fs/s3a/s3guard/ITestS3GuardToolLocal.java   | 187 
 18 files changed, 70 insertions(+), 1084 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f274fe33/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
index b4e535c..c8d0b33 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
@@ -81,11 +81,6 @@ public class KDiag extends Configured implements Tool, 
Closeable {
* variable. This is what kinit will use by default: {@value}
*/
   public static final String KRB5_CCNAME = "KRB5CCNAME";
-  /**
-   * Location of main kerberos configuration file as passed down via an
-   * environment variable.
-   */
-  public static final String KRB5_CONFIG = "KRB5_CONFIG";
   public static final String JAVA_SECURITY_KRB5_CONF
 = "java.security.krb5.conf";
   public static final String JAVA_SECURITY_KRB5_REALM
@@ -326,15 +321,14 @@ public class KDiag extends Configured implements Tool, 
Closeable {
 
 title("Environment Variables");
 for (String env : new String[]{
-HADOOP_JAAS_DEBUG,
-KRB5_CCNAME,
-KRB5_CONFIG,
-HADOOP_USER_NAME,
-HADOOP_PROXY_USER,
-HADOOP_TOKEN_FILE_LOCATION,
-"HADOOP_SECURE_LOG",
-"HADOOP_OPTS",
-"HADOOP_CLIENT_OPTS",
+  HADOOP_JAAS_DEBUG,
+  KRB5_CCNAME,
+  HADOOP_USER_NAME,
+  HADOOP_PROXY_USER,
+  HADOOP_TOKEN_FILE_LOCATION,
+  "HADOOP_SECURE_LOG",
+  "HADOOP_OPTS",
+  "HADOOP_CLIENT_OPTS",
 }) {
   printEnv(env);
 }
@@ -568,14 +562,14 @@ public class KDiag extends Configured implements Tool, 
Closeable {
 krbPath = jvmKrbPath;
   }
 
-  String krb5name = System.getenv(KRB5_CONFIG);
+  String krb5name = System.getenv(KRB5_CCNAME);
   if (krb5name != null) {
 println("Setting kerberos path from environment variable %s: \"%s\"",
-KRB5_CONFIG, krb5name);
+  KRB5_CCNAME, krb5name);
 krbPath = krb5name;
 if (jvmKrbPath != null) {
   println("Warning - both %s and %s were set - %s takes priority",
-  JAVA_SECURITY_KRB5_CONF, KRB5_CONFIG, KRB5_CONFIG);
+JAVA_SECURITY_KRB5_CONF, KRB5_CCNAME, KRB5_CCNAME);
 }
   }
 
@@ -925,7 +919,7 @@ public class KDiag extends Configured implements Tool, 
Closeable {
   private void dump(File file) throws IOException {
 try (FileInputStream in = new FileInputStream(file)) {
   for (String line : IOUtils.readLines(in)) {
-println("%s", line);
+println(line);
   }
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f274fe33/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/Invoker.java

[44/50] [abbrv] hadoop git commit: YARN-7788. Factor out management of temp tags from AllocationTagsManager. (Arun Suresh via kkaranasos)

2018-01-23 Thread sunilg
YARN-7788. Factor out management of temp tags from AllocationTagsManager. (Arun 
Suresh via kkaranasos)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc32ecf7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc32ecf7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc32ecf7

Branch: refs/heads/YARN-6592
Commit: dc32ecf76a6536419e59ebb08a481ba014c7d374
Parents: 743b0b3
Author: Konstantinos Karanasos 
Authored: Mon Jan 22 23:51:02 2018 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:24 2018 +0530

--
 .../constraint/AllocationTagsManager.java   | 110 +++-
 .../algorithm/DefaultPlacementAlgorithm.java|   8 +-
 .../algorithm/LocalAllocationTagsManager.java   | 167 +++
 .../constraint/TestAllocationTagsManager.java   |  82 -
 .../TestLocalAllocationTagsManager.java | 139 +++
 5 files changed, 336 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc32ecf7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
index 962e548..7ad5e8c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/constraint/AllocationTagsManager.java
@@ -24,17 +24,14 @@ import com.google.common.annotations.VisibleForTesting;
 import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.hadoop.yarn.api.resource.PlacementConstraints;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
 import org.apache.log4j.Logger;
 
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
@@ -61,9 +58,6 @@ public class AllocationTagsManager {
   // Application's tags to Rack
   private Map perAppRackMappings =
   new HashMap<>();
-  // Application's Temporary containers mapping
-  private Map>>
-  appTempMappings = new HashMap<>();
 
   // Global tags to node mapping (used to fast return aggregated tags
   // cardinality across apps)
@@ -76,7 +70,7 @@ public class AllocationTagsManager {
* Currently used both for NodeId to Tag, Count and Rack to Tag, Count
*/
   @VisibleForTesting
-  static class TypeToCountedTags {
+  public static class TypeToCountedTags {
 // Map>
 private Map> typeToTagsWithCount = new HashMap<>();
 
@@ -214,7 +208,7 @@ public class AllocationTagsManager {
   }
 
   @VisibleForTesting
-  Map getPerAppNodeMappings() {
+  public Map getPerAppNodeMappings() {
 return perAppNodeMappings;
   }
 
@@ -233,12 +227,6 @@ public class AllocationTagsManager {
 return globalRackMapping;
   }
 
-  @VisibleForTesting
-  public Map> getAppTempMappings(
-  ApplicationId applicationId) {
-return appTempMappings.get(applicationId);
-  }
-
   public AllocationTagsManager(RMContext context) {
 ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
 readLock = lock.readLock();
@@ -246,39 +234,6 @@ public class AllocationTagsManager {
 rmContext = context;
   }
 
-  //
-
-  /**
-   * Method adds a temporary fake-container tag to Node mapping.
-   * Used by the constrained placement algorithm to keep track of containers
-   * that are currently placed on nodes but are not yet 

[18/50] [abbrv] hadoop git commit: HADOOP-15166 CLI MiniCluster fails with ClassNotFoundException o.a.h.yarn.server.timelineservice.collector.TimelineCollectorManager. Contributed by Gera Shegalov

2018-01-23 Thread sunilg
HADOOP-15166 CLI MiniCluster fails with ClassNotFoundException 
o.a.h.yarn.server.timelineservice.collector.TimelineCollectorManager. 
Contributed by Gera Shegalov


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c191538e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c191538e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c191538e

Branch: refs/heads/YARN-6592
Commit: c191538ed18e12fff157e88a3203b23b20c10d83
Parents: ec8f47e
Author: Vrushali C 
Authored: Fri Jan 19 16:15:55 2018 -0800
Committer: Vrushali C 
Committed: Fri Jan 19 16:15:55 2018 -0800

--
 .../hadoop-common/src/site/markdown/CLIMiniCluster.md.vm   | 2 +-
 hadoop-mapreduce-project/bin/mapred| 6 ++
 2 files changed, 7 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c191538e/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
--
diff --git 
a/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm 
b/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
index 806df0a..9aa9ad2 100644
--- a/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
+++ b/hadoop-common-project/hadoop-common/src/site/markdown/CLIMiniCluster.md.vm
@@ -41,7 +41,7 @@ Running the MiniCluster
 
 From inside the root directory of the extracted tarball, you can start the CLI 
MiniCluster using the following command:
 
-$ 
HADOOP_CLASSPATH=share/hadoop/yarn/test/hadoop-yarn-server-tests-${project.version}-tests.jar
 bin/hadoop jar 
./share/hadoop/mapreduce/hadoop-mapreduce-client-jobclient-${project.version}-tests.jar
 minicluster -rmport RM_PORT -jhsport JHS_PORT
+$ bin/mapred minicluster -rmport RM_PORT -jhsport JHS_PORT
 
 In the example command above, `RM_PORT` and `JHS_PORT` should be replaced by 
the user's choice of these port numbers. If not specified, random free ports 
will be used.
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c191538e/hadoop-mapreduce-project/bin/mapred
--
diff --git a/hadoop-mapreduce-project/bin/mapred 
b/hadoop-mapreduce-project/bin/mapred
index 44f6216..9773ec8 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -36,6 +36,7 @@ function hadoop_usage
   hadoop_add_subcommand "sampler" client "sampler"
   hadoop_add_subcommand "frameworkuploader" admin "mapreduce framework upload"
   hadoop_add_subcommand "version" client "print the version"
+  hadoop_add_subcommand "minicluster" client "CLI MiniCluster"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" true
 }
 
@@ -101,6 +102,11 @@ function mapredcmd_case
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;
+minicluster)
+  hadoop_add_classpath 
"${HADOOP_YARN_HOME}/${YARN_DIR}/timelineservice"'/*'
+  hadoop_add_classpath "${HADOOP_YARN_HOME}/${YARN_DIR}/test"'/*'
+  HADOOP_CLASSNAME=org.apache.hadoop.mapreduce.MiniHadoopClusterManager
+;;
 *)
   HADOOP_CLASSNAME="${subcmd}"
   if ! hadoop_validate_classname "${HADOOP_CLASSNAME}"; then


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[35/50] [abbrv] hadoop git commit: YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)

2018-01-23 Thread sunilg
YARN-7612. Add Processor Framework for Rich Placement Constraints. (asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/458b9199
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/458b9199
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/458b9199

Branch: refs/heads/YARN-6592
Commit: 458b9199b9b97fa98b724301f36f9197e70efd28
Parents: a5a2e9b
Author: Arun Suresh 
Authored: Fri Dec 22 15:51:20 2017 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:23 2018 +0530

--
 .../hadoop/yarn/conf/YarnConfiguration.java |  26 ++
 .../src/main/resources/yarn-default.xml |  30 ++
 .../ApplicationMasterService.java   |  15 +
 .../rmcontainer/RMContainerImpl.java|   7 +-
 .../scheduler/capacity/CapacityScheduler.java   |   2 +
 .../constraint/processor/BatchedRequests.java   | 105 +
 .../processor/NodeCandidateSelector.java|  38 ++
 .../processor/PlacementDispatcher.java  | 145 +++
 .../processor/PlacementProcessor.java   | 343 
 .../processor/SamplePlacementAlgorithm.java | 144 +++
 .../constraint/processor/package-info.java  |  29 ++
 .../yarn/server/resourcemanager/MockAM.java |  26 ++
 .../yarn/server/resourcemanager/MockRM.java |  14 +
 .../constraint/TestPlacementProcessor.java  | 394 +++
 14 files changed, 1316 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/458b9199/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index f132683..61eb4f8 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -531,6 +531,32 @@ public class YarnConfiguration extends Configuration {
   /** The class to use as the resource scheduler.*/
   public static final String RM_SCHEDULER = 
 RM_PREFIX + "scheduler.class";
+
+  /** Placement Algorithm. */
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
+  RM_PREFIX + "placement-constraints.algorithm.class";
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
+  RM_PREFIX + "placement-constraints.enabled";
+
+  public static final boolean DEFAULT_RM_PLACEMENT_CONSTRAINTS_ENABLED = true;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS =
+  RM_PREFIX + "placement-constraints.retry-attempts";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_RETRY_ATTEMPTS = 3;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE =
+  RM_PREFIX + "placement-constraints.algorithm.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_ALGORITHM_POOL_SIZE 
=
+  1;
+
+  public static final String RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE =
+  RM_PREFIX + "placement-constraints.scheduler.pool-size";
+
+  public static final int DEFAULT_RM_PLACEMENT_CONSTRAINTS_SCHEDULER_POOL_SIZE 
=
+  1;
  
   public static final String DEFAULT_RM_SCHEDULER = 
   
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler";

http://git-wip-us.apache.org/repos/asf/hadoop/blob/458b9199/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index a9938c3..83079f4 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -131,6 +131,36 @@
   
 
   
+Enable Constraint Placement.
+yarn.resourcemanager.placement-constraints.enabled
+false
+  
+
+  
+Number of times to retry placing of rejected 
SchedulingRequests
+yarn.resourcemanager.placement-constraints.retry-attempts
+3
+  
+
+  
+Constraint Placement Algorithm to be used.
+yarn.resourcemanager.placement-constraints.algorithm.class
+
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm
+  
+
+  
+Threadpool 

[34/50] [abbrv] hadoop git commit: YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos Karanasos via asuresh)

2018-01-23 Thread sunilg
YARN-6596. Introduce Placement Constraint Manager module. (Konstantinos 
Karanasos via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a5a2e9bc
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a5a2e9bc
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a5a2e9bc

Branch: refs/heads/YARN-6592
Commit: a5a2e9bc8fe65a82ba6e2c7ceb02b158c4a49b54
Parents: 16ccca8
Author: Arun Suresh 
Authored: Fri Dec 22 13:26:30 2017 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:23 2018 +0530

--
 .../resourcemanager/RMActiveServiceContext.java |  15 +
 .../yarn/server/resourcemanager/RMContext.java  |   6 +
 .../server/resourcemanager/RMContextImpl.java   |  13 +
 .../server/resourcemanager/ResourceManager.java |  13 +
 .../MemoryPlacementConstraintManager.java   | 282 +++
 .../constraint/PlacementConstraintManager.java  | 151 ++
 .../PlacementConstraintManagerService.java  |  93 ++
 .../scheduler/constraint/package-info.java  |  29 ++
 .../TestPlacementConstraintManagerService.java  | 182 
 9 files changed, 784 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a2e9bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
index 4d0c230..06a1d00 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMActiveServiceContext.java
@@ -43,6 +43,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAlloca
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.AllocationTagsManager;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.distributed.QueueLimitCalculator;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.AMRMTokenSecretManager;
 import 
org.apache.hadoop.yarn.server.resourcemanager.security.ClientToAMTokenSecretManagerInRM;
@@ -109,6 +110,7 @@ public class RMActiveServiceContext {
   private RMAppLifetimeMonitor rmAppLifetimeMonitor;
   private QueueLimitCalculator queueLimitCalculator;
   private AllocationTagsManager allocationTagsManager;
+  private PlacementConstraintManager placementConstraintManager;
 
   public RMActiveServiceContext() {
 queuePlacementManager = new PlacementManager();
@@ -413,6 +415,19 @@ public class RMActiveServiceContext {
 
   @Private
   @Unstable
+  public PlacementConstraintManager getPlacementConstraintManager() {
+return placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
+  public void setPlacementConstraintManager(
+  PlacementConstraintManager placementConstraintManager) {
+this.placementConstraintManager = placementConstraintManager;
+  }
+
+  @Private
+  @Unstable
   public RMDelegatedNodeLabelsUpdater getRMDelegatedNodeLabelsUpdater() {
 return rmDelegatedNodeLabelsUpdater;
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/a5a2e9bc/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
index 00da108..eb91a31 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMContext.java
+++ 

[03/50] [abbrv] hadoop git commit: HADOOP-15123. KDiag tries to load krb5.conf from KRB5CCNAME instead of KRB5_CONFIG. Contributed by Vipin Rathor.

2018-01-23 Thread sunilg
HADOOP-15123. KDiag tries to load krb5.conf from KRB5CCNAME instead of 
KRB5_CONFIG.
Contributed by Vipin Rathor.

(cherry picked from commit 1ef906e29e0989aafcb35c51ad2acbb262b3c8e7)
(cherry picked from commit f61edab1d0ea08b6d752ecdfb6068103822012ec)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/de630708
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/de630708
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/de630708

Branch: refs/heads/YARN-6592
Commit: de630708d1912b3e4fa31e00f5d84a08a580e763
Parents: f274fe3
Author: Steve Loughran 
Authored: Thu Jan 18 12:29:36 2018 +
Committer: Steve Loughran 
Committed: Thu Jan 18 12:35:57 2018 +

--
 .../java/org/apache/hadoop/security/KDiag.java  | 30 
 1 file changed, 18 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/de630708/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
index c8d0b33..b4e535c 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/KDiag.java
@@ -81,6 +81,11 @@ public class KDiag extends Configured implements Tool, 
Closeable {
* variable. This is what kinit will use by default: {@value}
*/
   public static final String KRB5_CCNAME = "KRB5CCNAME";
+  /**
+   * Location of main kerberos configuration file as passed down via an
+   * environment variable.
+   */
+  public static final String KRB5_CONFIG = "KRB5_CONFIG";
   public static final String JAVA_SECURITY_KRB5_CONF
 = "java.security.krb5.conf";
   public static final String JAVA_SECURITY_KRB5_REALM
@@ -321,14 +326,15 @@ public class KDiag extends Configured implements Tool, 
Closeable {
 
 title("Environment Variables");
 for (String env : new String[]{
-  HADOOP_JAAS_DEBUG,
-  KRB5_CCNAME,
-  HADOOP_USER_NAME,
-  HADOOP_PROXY_USER,
-  HADOOP_TOKEN_FILE_LOCATION,
-  "HADOOP_SECURE_LOG",
-  "HADOOP_OPTS",
-  "HADOOP_CLIENT_OPTS",
+HADOOP_JAAS_DEBUG,
+KRB5_CCNAME,
+KRB5_CONFIG,
+HADOOP_USER_NAME,
+HADOOP_PROXY_USER,
+HADOOP_TOKEN_FILE_LOCATION,
+"HADOOP_SECURE_LOG",
+"HADOOP_OPTS",
+"HADOOP_CLIENT_OPTS",
 }) {
   printEnv(env);
 }
@@ -562,14 +568,14 @@ public class KDiag extends Configured implements Tool, 
Closeable {
 krbPath = jvmKrbPath;
   }
 
-  String krb5name = System.getenv(KRB5_CCNAME);
+  String krb5name = System.getenv(KRB5_CONFIG);
   if (krb5name != null) {
 println("Setting kerberos path from environment variable %s: \"%s\"",
-  KRB5_CCNAME, krb5name);
+KRB5_CONFIG, krb5name);
 krbPath = krb5name;
 if (jvmKrbPath != null) {
   println("Warning - both %s and %s were set - %s takes priority",
-JAVA_SECURITY_KRB5_CONF, KRB5_CCNAME, KRB5_CCNAME);
+  JAVA_SECURITY_KRB5_CONF, KRB5_CONFIG, KRB5_CONFIG);
 }
   }
 
@@ -919,7 +925,7 @@ public class KDiag extends Configured implements Tool, 
Closeable {
   private void dump(File file) throws IOException {
 try (FileInputStream in = new FileInputStream(file)) {
   for (String line : IOUtils.readLines(in)) {
-println(line);
+println("%s", line);
   }
 }
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[09/50] [abbrv] hadoop git commit: YARN-7139. FairScheduler: finished applications are always restored to default queue. Contributed by Wilfred Spiegelenburg.

2018-01-23 Thread sunilg
YARN-7139. FairScheduler: finished applications are always restored to default 
queue. Contributed by Wilfred Spiegelenburg.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/bc93ac22
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/bc93ac22
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/bc93ac22

Branch: refs/heads/YARN-6592
Commit: bc93ac229e17b1be440052217e51820b95c179ec
Parents: 37f4696
Author: Miklos Szegedi 
Authored: Thu Jan 18 16:03:53 2018 -0800
Committer: Miklos Szegedi 
Committed: Thu Jan 18 17:43:47 2018 -0800

--
 .../scheduler/fair/FairScheduler.java   | 15 ++--
 .../ParameterizedSchedulerTestBase.java |  8 
 .../TestWorkPreservingRMRestart.java| 39 
 .../scheduler/fair/FairSchedulerTestBase.java   | 22 +++
 .../scheduler/fair/TestFSAppAttempt.java|  8 ++--
 5 files changed, 77 insertions(+), 15 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc93ac22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
index b31ab07..e2a62ec 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java
@@ -495,15 +495,22 @@ public class FairScheduler extends
   applications.put(applicationId, application);
   queue.getMetrics().submitApp(user);
 
-LOG.info("Accepted application " + applicationId + " from user: " + 
user
-+ ", in queue: " + queue.getName()
-+ ", currently num of applications: " + applications.size());
+  LOG.info("Accepted application " + applicationId + " from user: " + user
+  + ", in queue: " + queue.getName()
+  + ", currently num of applications: " + applications.size());
   if (isAppRecovering) {
 if (LOG.isDebugEnabled()) {
   LOG.debug(applicationId
   + " is recovering. Skip notifying APP_ACCEPTED");
 }
-  } else{
+  } else {
+// During tests we do not always have an application object, handle
+// it here but we probably should fix the tests
+if (rmApp != null && rmApp.getApplicationSubmissionContext() != null) {
+  // Before we send out the event that the app is accepted is
+  // to set the queue in the submissionContext (needed on restore etc)
+  rmApp.getApplicationSubmissionContext().setQueue(queue.getName());
+}
 rmContext.getDispatcher().getEventHandler().handle(
 new RMAppEvent(applicationId, RMAppEventType.APP_ACCEPTED));
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/bc93ac22/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
index 9a29a89..4de16dc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ParameterizedSchedulerTestBase.java
@@ -26,6 +26,7 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
 import 

[38/50] [abbrv] hadoop git commit: YARN-7613. Implement Basic algorithm for constraint based placement. (Panagiotis Garefalakis via asuresh)

2018-01-23 Thread sunilg
YARN-7613. Implement Basic algorithm for constraint based placement. 
(Panagiotis Garefalakis via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f86d7ca0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f86d7ca0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f86d7ca0

Branch: refs/heads/YARN-6592
Commit: f86d7ca05473b2b35112053e4c1d99d3d1d16e0f
Parents: 458b919
Author: Arun Suresh 
Authored: Wed Dec 27 22:59:22 2017 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:23 2018 +0530

--
 .../hadoop/yarn/conf/YarnConfiguration.java |   4 +
 .../src/main/resources/yarn-default.xml |   8 +-
 .../rmcontainer/RMContainerImpl.java|  10 +-
 .../constraint/AllocationTagsManager.java   | 121 ++---
 .../algorithm/DefaultPlacementAlgorithm.java| 172 +++
 .../iterators/PopularTagsIterator.java  |  71 
 .../algorithm/iterators/SerialIterator.java |  53 ++
 .../algorithm/iterators/package-info.java   |  29 
 .../constraint/algorithm/package-info.java  |  29 
 .../constraint/processor/BatchedRequests.java   |  45 -
 .../processor/PlacementProcessor.java   |  32 ++--
 .../processor/SamplePlacementAlgorithm.java | 144 
 .../constraint/TestAllocationTagsManager.java   | 156 -
 .../TestBatchedRequestsIterators.java   |  82 +
 .../constraint/TestPlacementProcessor.java  |   4 +-
 15 files changed, 721 insertions(+), 239 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f86d7ca0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index 61eb4f8..bfda5bc 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -536,6 +536,10 @@ public class YarnConfiguration extends Configuration {
   public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_CLASS =
   RM_PREFIX + "placement-constraints.algorithm.class";
 
+  /** Used for BasicPlacementAlgorithm - default SERIAL. **/
+  public static final String RM_PLACEMENT_CONSTRAINTS_ALGORITHM_ITERATOR =
+  RM_PREFIX + "placement-constraints.algorithm.iterator";
+
   public static final String RM_PLACEMENT_CONSTRAINTS_ENABLED =
   RM_PREFIX + "placement-constraints.enabled";
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f86d7ca0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 83079f4..33a5f0c 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -145,7 +145,13 @@
   
 Constraint Placement Algorithm to be used.
 yarn.resourcemanager.placement-constraints.algorithm.class
-
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.processor.SamplePlacementAlgorithm
+
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.algorithm.DefaultPlacementAlgorithm
+  
+
+  
+Placement Algorithm Requests Iterator to be 
used.
+yarn.resourcemanager.placement-constraints.algorithm.iterator
+SERIAL
   
 
   

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f86d7ca0/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java
index c873509..2c4ef7b 100644
--- 

[12/50] [abbrv] hadoop git commit: HADOOP-14788. Credentials readTokenStorageFile to stop wrapping IOEs in IOEs. Contributed by Ajay Kumar.

2018-01-23 Thread sunilg
HADOOP-14788. Credentials readTokenStorageFile to stop wrapping IOEs in IOEs.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e5a1ad6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e5a1ad6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e5a1ad6e

Branch: refs/heads/YARN-6592
Commit: e5a1ad6e24807b166a40d1332c889c2c4cb4c733
Parents: c5bbd64
Author: Steve Loughran 
Authored: Fri Jan 19 14:48:45 2018 +
Committer: Steve Loughran 
Committed: Fri Jan 19 14:48:45 2018 +

--
 .../main/java/org/apache/hadoop/io/IOUtils.java | 56 
 .../org/apache/hadoop/security/Credentials.java |  3 +-
 .../java/org/apache/hadoop/io/TestIOUtils.java  | 25 -
 3 files changed, 82 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5a1ad6e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
index 4684fb6..f451ff3 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/IOUtils.java
@@ -19,6 +19,7 @@
 package org.apache.hadoop.io;
 
 import java.io.*;
+import java.lang.reflect.Constructor;
 import java.net.Socket;
 import java.nio.ByteBuffer;
 import java.nio.channels.FileChannel;
@@ -35,6 +36,7 @@ import org.apache.commons.logging.Log;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.PathIOException;
 import org.apache.hadoop.util.Shell;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -450,4 +452,58 @@ public class IOUtils {
   throw ioe;
 }
   }
+
+  /**
+   * Takes an IOException, file/directory path, and method name and returns an
+   * IOException with the input exception as the cause and also include the
+   * file,method details. The new exception provides the stack trace of the
+   * place where the exception is thrown and some extra diagnostics
+   * information.
+   *
+   * Return instance of same exception if exception class has a public string
+   * constructor; Otherwise return an PathIOException.
+   * InterruptedIOException and PathIOException are returned unwrapped.
+   *
+   * @param path file/directory path
+   * @param methodName method name
+   * @param exception the caught exception.
+   * @return an exception to throw
+   */
+  public static IOException wrapException(final String path,
+  final String methodName, final IOException exception) {
+
+if (exception instanceof InterruptedIOException
+|| exception instanceof PathIOException) {
+  return exception;
+} else {
+  String msg = String
+  .format("Failed with %s while processing file/directory :[%s] in "
+  + "method:[%s]",
+  exception.getClass().getName(), path, methodName);
+  try {
+return wrapWithMessage(exception, msg);
+  } catch (Exception ex) {
+// For subclasses which have no (String) constructor throw IOException
+// with wrapped message
+
+return new PathIOException(path, exception);
+  }
+}
+  }
+
+  @SuppressWarnings("unchecked")
+  private static  T wrapWithMessage(
+  final T exception, final String msg) throws T {
+Class clazz = exception.getClass();
+try {
+  Constructor ctor = clazz
+  .getConstructor(String.class);
+  Throwable t = ctor.newInstance(msg);
+  return (T) (t.initCause(exception));
+} catch (Throwable e) {
+  LOG.warn("Unable to wrap exception of type " +
+  clazz + ": it has no (String) constructor", e);
+  throw exception;
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e5a1ad6e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
index 3e51249..6a9527a 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java
@@ -207,7 +207,8 @@ public class Credentials 

[08/50] [abbrv] hadoop git commit: YARN-7740. Fix logging for destroy yarn service cli when app does not exist and some minor bugs. Contributed by Jian He

2018-01-23 Thread sunilg
YARN-7740. Fix logging for destroy yarn service cli when app does not exist and 
some minor bugs. Contributed by Jian He


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37f4696a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37f4696a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37f4696a

Branch: refs/heads/YARN-6592
Commit: 37f4696a9cc9284b242215f56a10990e1028d40c
Parents: 06cceba
Author: Billie Rinaldi 
Authored: Thu Jan 18 12:11:19 2018 -0800
Committer: Billie Rinaldi 
Committed: Thu Jan 18 12:11:19 2018 -0800

--
 .../hadoop/yarn/service/ServiceScheduler.java   |  2 +-
 .../yarn/service/client/ServiceClient.java  | 37 
 .../provider/AbstractClientProvider.java|  4 +--
 .../service/utils/ServiceRegistryUtils.java | 37 ++--
 .../yarn/service/TestYarnNativeServices.java|  7 ++--
 .../container/ContainerImpl.java|  4 ++-
 6 files changed, 52 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37f4696a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
index eb4783f..6cf4e14 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/ServiceScheduler.java
@@ -399,7 +399,7 @@ public class ServiceScheduler extends CompositeService {
   LOG.error("Failed to get user.", e);
 }
 globalTokens
-.put(SERVICE_ZK_PATH, ServiceRegistryUtils.mkClusterPath(user, 
app.getName()));
+.put(SERVICE_ZK_PATH, ServiceRegistryUtils.mkServiceHomePath(user, 
app.getName()));
 
 globalTokens.put(ServiceApiConstants.USER, user);
 String dnsDomain = getConfig().getTrimmed(KEY_DNS_DOMAIN);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/37f4696a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
index bf46d15..c224089 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-services/hadoop-yarn-services-core/src/main/java/org/apache/hadoop/yarn/service/client/ServiceClient.java
@@ -433,6 +433,7 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 FileSystem fileSystem = fs.getFileSystem();
 // remove from the appId cache
 cachedAppInfo.remove(serviceName);
+boolean destroySucceed = true;
 if (fileSystem.exists(appDir)) {
   if (fileSystem.delete(appDir, true)) {
 LOG.info("Successfully deleted service dir for " + serviceName + ": "
@@ -443,20 +444,37 @@ public class ServiceClient extends AppAdminClient 
implements SliderExitCodes,
 LOG.info(message);
 throw new YarnException(message);
   }
+} else {
+  LOG.info("Service '" + serviceName + "' doesn't exist at hdfs path: "
+  + appDir);
+  destroySucceed = false;
 }
 try {
   deleteZKNode(serviceName);
 } catch (Exception e) {
   throw new IOException("Could not delete zk node for " + serviceName, e);
 }
-String registryPath = 
ServiceRegistryUtils.registryPathForInstance(serviceName);
+String registryPath =
+ServiceRegistryUtils.registryPathForInstance(serviceName);
 try {
-  getRegistryClient().delete(registryPath, true);
+  if 

[32/50] [abbrv] hadoop git commit: YARN-7669. API and interface modifications for placement constraint processor. (asuresh)

2018-01-23 Thread sunilg
YARN-7669. API and interface modifications for placement constraint processor. 
(asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c94ed6f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c94ed6f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c94ed6f9

Branch: refs/heads/YARN-6592
Commit: c94ed6f9eaa0103ccd623bf4b66309d1b55516ba
Parents: 67d1958
Author: Arun Suresh 
Authored: Tue Dec 19 22:47:46 2017 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:23 2018 +0530

--
 .../yarn/ams/ApplicationMasterServiceUtils.java |  16 +
 .../api/protocolrecords/AllocateResponse.java   |  23 +
 .../api/records/RejectedSchedulingRequest.java  |  70 +++
 .../yarn/api/records/RejectionReason.java   |  44 ++
 .../src/main/proto/yarn_protos.proto|  10 +
 .../src/main/proto/yarn_service_protos.proto|   1 +
 .../impl/pb/AllocateResponsePBImpl.java |  85 
 .../yarn/api/records/impl/pb/ProtoUtils.java|  16 +
 .../pb/RejectedSchedulingRequestPBImpl.java | 148 +++
 .../records/impl/pb/ResourceSizingPBImpl.java   |   8 +
 .../impl/pb/SchedulingRequestPBImpl.java|  11 +
 .../hadoop/yarn/api/TestPBImplRecords.java  |   2 +
 .../resourcemanager/RMActiveServiceContext.java |   2 +-
 .../yarn/server/resourcemanager/RMContext.java  |   2 +-
 .../server/resourcemanager/RMContextImpl.java   |   2 +-
 .../server/resourcemanager/ResourceManager.java |   2 +-
 .../constraint/AllocationTagsManager.java   | 431 ---
 .../constraint/AllocationTagsNamespaces.java|  31 --
 .../InvalidAllocationTagsQueryException.java|  35 --
 .../constraint/AllocationTagsManager.java   | 431 +++
 .../constraint/AllocationTagsNamespaces.java|  31 ++
 .../InvalidAllocationTagsQueryException.java|  35 ++
 .../api/ConstraintPlacementAlgorithm.java   |  43 ++
 .../api/ConstraintPlacementAlgorithmInput.java  |  32 ++
 .../api/ConstraintPlacementAlgorithmOutput.java |  58 +++
 ...traintPlacementAlgorithmOutputCollector.java |  32 ++
 .../constraint/api/PlacedSchedulingRequest.java |  79 
 .../constraint/api/SchedulingResponse.java  |  70 +++
 .../scheduler/constraint/api/package-info.java  |  28 ++
 .../constraint/TestAllocationTagsManager.java   | 328 --
 .../rmcontainer/TestRMContainerImpl.java|   2 +-
 .../scheduler/capacity/TestUtils.java   |   2 +-
 .../constraint/TestAllocationTagsManager.java   | 328 ++
 .../scheduler/fifo/TestFifoScheduler.java   |   2 +-
 34 files changed, 1608 insertions(+), 832 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c94ed6f9/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
index 476da8b..8bdfaf3 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/ams/ApplicationMasterServiceUtils.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.yarn.ams;
 import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
 import org.apache.hadoop.yarn.api.records.Container;
 import org.apache.hadoop.yarn.api.records.ContainerUpdateType;
+import org.apache.hadoop.yarn.api.records.RejectedSchedulingRequest;
 import org.apache.hadoop.yarn.api.records.UpdateContainerError;
 import org.apache.hadoop.yarn.api.records.UpdatedContainer;
 
@@ -86,4 +87,19 @@ public final class ApplicationMasterServiceUtils {
 }
 allocateResponse.setAllocatedContainers(allocatedContainers);
   }
+
+  /**
+   * Add rejected Scheduling Requests to {@link AllocateResponse}.
+   * @param allocateResponse Allocate Response.
+   * @param rejectedRequests Rejected SchedulingRequests.
+   */
+  public static void addToRejectedSchedulingRequests(
+  AllocateResponse allocateResponse,
+  List rejectedRequests) {
+if (allocateResponse.getRejectedSchedulingRequests() != null
+&& !allocateResponse.getRejectedSchedulingRequests().isEmpty()) {
+  
rejectedRequests.addAll(allocateResponse.getRejectedSchedulingRequests());
+}
+allocateResponse.setRejectedSchedulingRequests(rejectedRequests);
+  }
 }


[40/50] [abbrv] hadoop git commit: YARN-7681. Double-check placement constraints in scheduling phase before actual allocation is made. (Weiwei Yang via asuresh)

2018-01-23 Thread sunilg
YARN-7681. Double-check placement constraints in scheduling phase before actual 
allocation is made. (Weiwei Yang via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8c9d8de8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8c9d8de8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8c9d8de8

Branch: refs/heads/YARN-6592
Commit: 8c9d8de88d7842b7b18087fa2980837cdac1fe9a
Parents: cc54485
Author: Arun Suresh 
Authored: Wed Jan 10 09:04:30 2018 -0800
Committer: Sunil G 
Committed: Tue Jan 23 15:20:23 2018 +0530

--
 .../scheduler/capacity/CapacityScheduler.java   | 23 
 1 file changed, 23 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8c9d8de8/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
index 28b3689..2abadfd 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java
@@ -122,6 +122,8 @@ import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.ResourceCo
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.SchedulerContainer;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerNode;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.InvalidAllocationTagsQueryException;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.constraint.PlacementConstraintsUtil;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptAddedSchedulerEvent;
 import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.AppAttemptRemovedSchedulerEvent;
@@ -2514,6 +2516,27 @@ public class CapacityScheduler extends
 ResourceCommitRequest
 resourceCommitRequest = createResourceCommitRequest(
 appAttempt, schedulingRequest, schedulerNode);
+
+// Validate placement constraint is satisfied before
+// committing the request.
+try {
+  if (!PlacementConstraintsUtil.canSatisfyConstraints(
+  appAttempt.getApplicationId(),
+  schedulingRequest.getAllocationTags(),
+  schedulerNode,
+  rmContext.getPlacementConstraintManager(),
+  rmContext.getAllocationTagsManager())) {
+LOG.debug("Failed to allocate container for application "
++ appAttempt.getApplicationId() + " on node "
++ schedulerNode.getNodeName()
++ " because this allocation violates the"
++ " placement constraint.");
+return false;
+  }
+} catch (InvalidAllocationTagsQueryException e) {
+  LOG.warn("Unable to allocate container", e);
+  return false;
+}
 return tryCommit(getClusterResource(), resourceCommitRequest, false);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



  1   2   >