hadoop git commit: HDDS-228. Add the ReplicaMaps to ContainerStateManager. Contributed by Ajay Kumar.

2018-07-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk a08812a1b -> 5ee90efed


HDDS-228. Add the ReplicaMaps to ContainerStateManager.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5ee90efe
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5ee90efe
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5ee90efe

Branch: refs/heads/trunk
Commit: 5ee90efed385db4bf235816145b30a0f691fc91b
Parents: a08812a
Author: Anu Engineer 
Authored: Thu Jul 12 10:43:24 2018 -0700
Committer: Anu Engineer 
Committed: Thu Jul 12 10:43:24 2018 -0700

--
 .../scm/container/ContainerStateManager.java| 34 
 .../scm/container/states/ContainerStateMap.java | 86 
 .../container/TestContainerStateManager.java| 79 ++
 3 files changed, 199 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee90efe/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
index 870ab1d..223deac 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdds.scm.container;
 
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import 
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
@@ -488,4 +489,37 @@ public class ContainerStateManager implements Closeable {
   public void close() throws IOException {
   }
 
+  /**
+   * Returns the latest list of DataNodes where replica for given containerId
+   * exist. Throws an SCMException if no entry is found for given containerId.
+   *
+   * @param containerID
+   * @return Set
+   */
+  public Set getContainerReplicas(ContainerID containerID)
+  throws SCMException {
+return containers.getContainerReplicas(containerID);
+  }
+
+  /**
+   * Add a container Replica for given DataNode.
+   *
+   * @param containerID
+   * @param dn
+   */
+  public void addContainerReplica(ContainerID containerID, DatanodeDetails dn) 
{
+containers.addContainerReplica(containerID, dn);
+  }
+
+  /**
+   * Remove a container Replica for given DataNode.
+   *
+   * @param containerID
+   * @param dn
+   * @return True of dataNode is removed successfully else false.
+   */
+  public boolean removeContainerReplica(ContainerID containerID,
+  DatanodeDetails dn) throws SCMException {
+return containers.removeContainerReplica(containerID, dn);
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5ee90efe/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
index c23b1fd..1c92861 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/states/ContainerStateMap.java
@@ -18,13 +18,18 @@
 
 package org.apache.hadoop.hdds.scm.container.states;
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
+import java.util.HashSet;
+import java.util.Set;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.container.ContainerID;
 import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdds.scm.exceptions.SCMException;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes;
 import org.apache.hadoop.util.AutoCloseableLock;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -83,6 +88,8 @@ public class ContainerStateMap {
   private final ContainerAttribute typeMap;
 
   

hadoop git commit: HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.

2018-07-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fbe719281 -> ff46db2ea


HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff46db2e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff46db2e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff46db2e

Branch: refs/heads/branch-2
Commit: ff46db2ea6fcf3eb5f6a3cfb54af79e3d1ce1632
Parents: fbe7192
Author: Akira Ajisaka 
Authored: Thu Jul 12 13:53:08 2018 -0400
Committer: Akira Ajisaka 
Committed: Thu Jul 12 13:53:08 2018 -0400

--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff46db2e/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index a58c6c8..e5b94d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -402,7 +402,7 @@ Runs a HDFS dfsadmin client.
 
 Usage: `hdfs dfsrouter`
 
-Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more 
info.
+Runs the DFS router. See 
[Router](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Router) for more info.
 
 ### `dfsrouteradmin`
 
@@ -431,7 +431,7 @@ Usage:
 | `-nameservice` `disable` `enable` *nameservice* | Disable/enable  a name 
service from the federation. If disabled, requests will not go to that name 
service. |
 | `-getDisabledNameservices` | Get the name services that are disabled in the 
federation. |
 
-The commands for managing Router-based federation. See [Mount table 
management](./HDFSRouterFederation.html#Mount_table_management) for more info.
+The commands for managing Router-based federation. See [Mount table 
management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management)
 for more info.
 
 ### `haadmin`
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.

2018-07-12 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.9 bf0267481 -> 8d14f9b07


HDFS-13729. Fix broken links to RBF documentation. Contributed by Gabor Bota.

(cherry picked from commit ff46db2ea6fcf3eb5f6a3cfb54af79e3d1ce1632)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8d14f9b0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8d14f9b0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8d14f9b0

Branch: refs/heads/branch-2.9
Commit: 8d14f9b07d65eef544fc6b68d913138bbb63bafc
Parents: bf02674
Author: Akira Ajisaka 
Authored: Thu Jul 12 13:53:08 2018 -0400
Committer: Akira Ajisaka 
Committed: Thu Jul 12 13:53:35 2018 -0400

--
 .../hadoop-hdfs/src/site/markdown/HDFSCommands.md| 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8d14f9b0/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index f801b01..1cea79f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -402,7 +402,7 @@ Runs a HDFS dfsadmin client.
 
 Usage: `hdfs dfsrouter`
 
-Runs the DFS router. See [Router](./HDFSRouterFederation.html#Router) for more 
info.
+Runs the DFS router. See 
[Router](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Router) for more info.
 
 ### `dfsrouteradmin`
 
@@ -427,7 +427,7 @@ Usage:
 | `-nameservice` `disable` `enable` *nameservice* | Disable/enable  a name 
service from the federation. If disabled, requests will not go to that name 
service. |
 | `-getDisabledNameservices` | Get the name services that are disabled in the 
federation. |
 
-The commands for managing Router-based federation. See [Mount table 
management](./HDFSRouterFederation.html#Mount_table_management) for more info.
+The commands for managing Router-based federation. See [Mount table 
management](../hadoop-hdfs-rbf/HDFSRouterFederation.html#Mount_table_management)
 for more info.
 
 ### `haadmin`
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-234. Add SCM node report handler. Contributed by Ajay Kumar.

2018-07-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 5ee90efed -> 556d9b36b


HDDS-234. Add SCM node report handler.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/556d9b36
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/556d9b36
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/556d9b36

Branch: refs/heads/trunk
Commit: 556d9b36be4b0b759646b8f6030c9e693b97bdb8
Parents: 5ee90ef
Author: Anu Engineer 
Authored: Thu Jul 12 12:09:31 2018 -0700
Committer: Anu Engineer 
Committed: Thu Jul 12 12:09:31 2018 -0700

--
 .../hadoop/hdds/scm/node/NodeManager.java   |  9 ++
 .../hadoop/hdds/scm/node/NodeReportHandler.java | 19 +++-
 .../hadoop/hdds/scm/node/SCMNodeManager.java| 11 +++
 .../hdds/scm/container/MockNodeManager.java | 11 +++
 .../hdds/scm/node/TestNodeReportHandler.java| 95 
 .../testutils/ReplicationNodeManagerMock.java   | 10 +++
 6 files changed, 152 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/556d9b36/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
index 5e2969d..deb1628 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeManager.java
@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdds.scm.node;
 
+import 
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeMetric;
 import org.apache.hadoop.hdds.scm.container.placement.metrics.SCMNodeStat;
 import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
@@ -138,4 +139,12 @@ public interface NodeManager extends 
StorageContainerNodeProtocol,
* @param command
*/
   void addDatanodeCommand(UUID dnId, SCMCommand command);
+
+  /**
+   * Process node report.
+   *
+   * @param dnUuid
+   * @param nodeReport
+   */
+  void processNodeReport(UUID dnUuid, NodeReportProto nodeReport);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/556d9b36/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
index aa78d53..331bfed 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/NodeReportHandler.java
@@ -7,7 +7,7 @@
  * "License"); you may not use this file except in compliance
  * with the License.  You may obtain a copy of the License at
  *
- * http://www.apache.org/licenses/LICENSE-2.0
+ * http://www.apache.org/licenses/LICENSE-2.0
  *
  * Unless required by applicable law or agreed to in writing, software
  * distributed under the License is distributed on an "AS IS" BASIS,
@@ -18,25 +18,38 @@
 
 package org.apache.hadoop.hdds.scm.node;
 
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.scm.server.SCMDatanodeHeartbeatDispatcher
 .NodeReportFromDatanode;
 import org.apache.hadoop.hdds.server.events.EventHandler;
 import org.apache.hadoop.hdds.server.events.EventPublisher;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 
 /**
  * Handles Node Reports from datanode.
  */
 public class NodeReportHandler implements EventHandler 
{
 
+  private static final Logger LOGGER = LoggerFactory
+  .getLogger(NodeReportHandler.class);
   private final NodeManager nodeManager;
 
   public NodeReportHandler(NodeManager nodeManager) {
+Preconditions.checkNotNull(nodeManager);
 this.nodeManager = nodeManager;
   }
 
   @Override
   public void onMessage(NodeReportFromDatanode nodeReportFromDatanode,
-EventPublisher publisher) {
-//TODO: process node report.
+  EventPublisher publisher) {
+Preconditions.checkNotNull(nodeReportFromDatanode);
+DatanodeDetails dn = nodeReportFromDatanode.getDatanodeDetails();
+Preconditions.checkNotNull(dn, "NodeReport is "
++ "missing DatanodeDetails.");
+LOGGER.trace("Processing node report for dn: {}", dn);
+nodeManager
+.processNodeReport(dn.getUuid(), 

hadoop git commit: YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via rkanter)

2018-07-12 Thread rkanter
Repository: hadoop
Updated Branches:
  refs/heads/trunk 556d9b36b -> 1bc106a73


YARN-8518. test-container-executor test_is_empty() is broken (Jim_Brennan via 
rkanter)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1bc106a7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1bc106a7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1bc106a7

Branch: refs/heads/trunk
Commit: 1bc106a738a6ce4f7ed025d556bb44c1ede022e3
Parents: 556d9b3
Author: Robert Kanter 
Authored: Thu Jul 12 16:38:46 2018 -0700
Committer: Robert Kanter 
Committed: Thu Jul 12 16:38:46 2018 -0700

--
 .../container-executor/test/test-container-executor.c | 14 +-
 1 file changed, 9 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1bc106a7/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
index a199d84..5607823 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/native/container-executor/test/test-container-executor.c
@@ -1203,19 +1203,23 @@ void test_trim_function() {
   free(trimmed);
 }
 
+int is_empty(char *name);
+
 void test_is_empty() {
   printf("\nTesting is_empty function\n");
   if (is_empty("/")) {
 printf("FAIL: / should not be empty\n");
 exit(1);
   }
-  if (is_empty("/tmp/2938rf2983hcqnw8ud/noexist")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/noexist should not exist\n");
+  char *noexist = TEST_ROOT "/noexist";
+  if (is_empty(noexist)) {
+printf("%s should not exist\n", noexist);
 exit(1);
   }
-  mkdir("/tmp/2938rf2983hcqnw8ud/emptydir", S_IRWXU);
-  if (!is_empty("/tmp/2938rf2983hcqnw8ud/emptydir")) {
-printf("FAIL: /tmp/2938rf2983hcqnw8ud/emptydir be empty\n");
+  char *emptydir = TEST_ROOT "/emptydir";
+  mkdir(emptydir, S_IRWXU);
+  if (!is_empty(emptydir)) {
+printf("FAIL: %s should be empty\n", emptydir);
 exit(1);
   }
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-13663. Should throw exception when incorrect block size is set. Contributed by Shweta.

2018-07-12 Thread xiao
Repository: hadoop
Updated Branches:
  refs/heads/trunk 1bc106a73 -> 87eeb26e7


HDFS-13663. Should throw exception when incorrect block size is set. 
Contributed by Shweta.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/87eeb26e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/87eeb26e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/87eeb26e

Branch: refs/heads/trunk
Commit: 87eeb26e7200fa3be0ca62ebf163985b58ad309e
Parents: 1bc106a
Author: Xiao Chen 
Authored: Thu Jul 12 20:19:14 2018 -0700
Committer: Xiao Chen 
Committed: Thu Jul 12 20:24:11 2018 -0700

--
 .../apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java  | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/87eeb26e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
index 94835e2..34f6c33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockRecoveryWorker.java
@@ -275,7 +275,9 @@ public class BlockRecoveryWorker {
 }
 // recover() guarantees syncList will have at least one replica with 
RWR
 // or better state.
-assert minLength != Long.MAX_VALUE : "wrong minLength";
+if (minLength == Long.MAX_VALUE) {
+  throw new IOException("Incorrect block size");
+}
 newBlock.setNumBytes(minLength);
 break;
   case RUR:


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDDS-187. Command status publisher for datanode. Contributed by Ajay Kumar.

2018-07-12 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/trunk 87eeb26e7 -> f89e26590


HDDS-187. Command status publisher for datanode.
Contributed by Ajay Kumar.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f89e2659
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f89e2659
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f89e2659

Branch: refs/heads/trunk
Commit: f89e265905f39c8e51263a3946a8b8e6ab4ebad9
Parents: 87eeb26
Author: Anu Engineer 
Authored: Thu Jul 12 21:34:32 2018 -0700
Committer: Anu Engineer 
Committed: Thu Jul 12 21:35:12 2018 -0700

--
 .../org/apache/hadoop/hdds/HddsConfigKeys.java  |   8 +
 .../org/apache/hadoop/hdds/HddsIdFactory.java   |  53 ++
 .../common/src/main/resources/ozone-default.xml |   9 +
 .../apache/hadoop/utils/TestHddsIdFactory.java  |  77 +
 .../report/CommandStatusReportPublisher.java|  71 
 .../common/report/ReportPublisher.java  |   9 +
 .../common/report/ReportPublisherFactory.java   |   4 +
 .../statemachine/DatanodeStateMachine.java  |   2 +
 .../common/statemachine/StateContext.java   |  70 
 .../CloseContainerCommandHandler.java   |   5 +-
 .../commandhandler/CommandHandler.java  |  11 ++
 .../DeleteBlocksCommandHandler.java | 166 ++-
 .../ReplicateContainerCommandHandler.java   |   7 +-
 .../commands/CloseContainerCommand.java |  36 ++--
 .../ozone/protocol/commands/CommandStatus.java  | 141 
 .../protocol/commands/DeleteBlocksCommand.java  |  13 +-
 .../commands/ReplicateContainerCommand.java |  20 ++-
 .../protocol/commands/ReregisterCommand.java|  10 ++
 .../ozone/protocol/commands/SCMCommand.java |  19 +++
 .../StorageContainerDatanodeProtocol.proto  |  21 +++
 .../ozone/container/common/ScmTestMock.java |  33 +++-
 .../common/report/TestReportPublisher.java  |  75 -
 .../hadoop/hdds/scm/events/SCMEvents.java   |  57 ---
 .../server/SCMDatanodeHeartbeatDispatcher.java  |  23 ++-
 .../TestSCMDatanodeHeartbeatDispatcher.java |  25 ++-
 .../ozone/container/common/TestEndPoint.java| 111 -
 26 files changed, 935 insertions(+), 141 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89e2659/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
index dec2c1c..8b449fb 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsConfigKeys.java
@@ -17,7 +17,15 @@
  */
 package org.apache.hadoop.hdds;
 
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+/**
+ * Config class for HDDS.
+ */
 public final class HddsConfigKeys {
   private HddsConfigKeys() {
   }
+  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL =
+  "hdds.command.status.report.interval";
+  public static final String HDDS_COMMAND_STATUS_REPORT_INTERVAL_DEFAULT =
+  ScmConfigKeys.OZONE_SCM_HEARBEAT_INTERVAL_DEFAULT;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f89e2659/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
new file mode 100644
index 000..b244b8c
--- /dev/null
+++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsIdFactory.java
@@ -0,0 +1,53 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdds;
+
+import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
+
+/**
+ * HDDS Id generator.
+ */
+public final class 

hadoop git commit: HDDS-238. Add Node2Pipeline Map in SCM to track ratis/standalone pipelines. Contributed by Mukul Kumar Singh.

2018-07-12 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk f89e26590 -> 3f3f72221


HDDS-238. Add Node2Pipeline Map in SCM to track ratis/standalone pipelines. 
Contributed by Mukul Kumar Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3f3f7222
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3f3f7222
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3f3f7222

Branch: refs/heads/trunk
Commit: 3f3f72221ffd11cc6bfa0e010e3c5b0e14911102
Parents: f89e265
Author: Xiaoyu Yao 
Authored: Thu Jul 12 22:02:57 2018 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jul 12 22:14:03 2018 -0700

--
 .../container/common/helpers/ContainerInfo.java |  11 ++
 .../hdds/scm/container/ContainerMapping.java|  11 +-
 .../scm/container/ContainerStateManager.java|   6 +
 .../scm/container/states/ContainerStateMap.java |  36 +-
 .../hdds/scm/pipelines/Node2PipelineMap.java| 121 +++
 .../hdds/scm/pipelines/PipelineManager.java |  22 ++--
 .../hdds/scm/pipelines/PipelineSelector.java|  24 +++-
 .../scm/pipelines/ratis/RatisManagerImpl.java   |  11 +-
 .../standalone/StandaloneManagerImpl.java   |   7 +-
 .../hdds/scm/pipeline/TestNode2PipelineMap.java | 117 ++
 10 files changed, 343 insertions(+), 23 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3f7222/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
--
diff --git 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
index 9593717..4074b21 100644
--- 
a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
+++ 
b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java
@@ -456,4 +456,15 @@ public class ContainerInfo implements 
Comparator,
   replicationFactor, replicationType);
 }
   }
+
+  /**
+   * Check if a container is in open state, this will check if the
+   * container is either open or allocated or creating. Any containers in
+   * these states is managed as an open container by SCM.
+   */
+  public boolean isContainerOpen() {
+return state == HddsProtos.LifeCycleState.ALLOCATED ||
+state == HddsProtos.LifeCycleState.CREATING ||
+state == HddsProtos.LifeCycleState.OPEN;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3f7222/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
--
diff --git 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
index abad32c..26f4d86 100644
--- 
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
+++ 
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java
@@ -477,7 +477,7 @@ public class ContainerMapping implements Mapping {
 List
 containerInfos = reports.getReportsList();
 
- for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
+for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState :
 containerInfos) {
   byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID());
   lock.lock();
@@ -498,7 +498,9 @@ public class ContainerMapping implements Mapping {
   containerStore.put(dbKey, newState.toByteArray());
 
   // If the container is closed, then state is already written to SCM
-  Pipeline pipeline = 
pipelineSelector.getPipeline(newState.getPipelineName(), 
newState.getReplicationType());
+  Pipeline pipeline =
+  pipelineSelector.getPipeline(newState.getPipelineName(),
+  newState.getReplicationType());
   if(pipeline == null) {
 pipeline = pipelineSelector
 .getReplicationPipeline(newState.getReplicationType(),
@@ -713,4 +715,9 @@ public class ContainerMapping implements Mapping {
   public MetadataStore getContainerStore() {
 return containerStore;
   }
+
+  @VisibleForTesting
+  public PipelineSelector getPipelineSelector() {
+return pipelineSelector;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3f3f7222/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java
--
diff 

[11/50] [abbrv] hadoop git commit: HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G

2018-07-12 Thread rakeshr
HDFS-11874. [SPS]: Document the SPS feature. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac0e71c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac0e71c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac0e71c3

Branch: refs/heads/HDFS-10285
Commit: ac0e71c3ce38a45023eb5d85cdbe0f22c8e7739f
Parents: b43972a
Author: Rakesh Radhakrishnan 
Authored: Fri Jul 14 22:36:09 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:01:37 2018 +0530

--
 .../src/site/markdown/ArchivalStorage.md| 51 ++--
 1 file changed, 48 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac0e71c3/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
index a56cf8b..9098616 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/ArchivalStorage.md
@@ -97,8 +97,44 @@ The effective storage policy can be retrieved by the 
"[`storagepolicies -getStor
 
 The default storage type of a datanode storage location will be DISK if it 
does not have a storage type tagged explicitly.
 
-Mover - A New Data Migration Tool
--
+Storage Policy Based Data Movement
+--
+
+Setting a new storage policy on already existing file/dir will change the 
policy in Namespace, but it will not move the blocks physically across storage 
medias.
+Following 2 options will allow users to move the blocks based on new policy 
set. So, once user change/set to a new policy on file/directory, user should 
also perform one of the following options to achieve the desired data movement. 
Note that both options cannot be allowed to run simultaneously.
+
+### Storage Policy Satisfier (SPS)
+
+When user changes the storage policy on a file/directory, user can call 
`HdfsAdmin` API `satisfyStoragePolicy()` to move the blocks as per the new 
policy set.
+The SPS daemon thread runs along with namenode and periodically scans for the 
storage mismatches between new policy set and the physical blocks placed. This 
will only track the files/directories for which user invoked 
satisfyStoragePolicy. If SPS identifies some blocks to be moved for a file, 
then it will schedule block movement tasks to datanodes. A Coordinator 
DataNode(C-DN) will track all block movements associated to a file and notify 
to namenode about movement success/failure. If there are any failures in 
movement, the SPS will re-attempt by sending new block movement task.
+
+SPS can be activated and deactivated dynamically without restarting the 
Namenode.
+
+Detailed design documentation can be found at [Storage Policy Satisfier(SPS) 
(HDFS-10285)](https://issues.apache.org/jira/browse/HDFS-10285)
+
+* **Note**: When user invokes `satisfyStoragePolicy()` API on a directory, SPS 
will consider the files which are immediate to that directory. Sub-directories 
won't be considered for satisfying the policy. Its user responsibility to call 
this API on directories recursively, to track all files under the sub tree.
+
+* HdfsAdmin API :
+`public void satisfyStoragePolicy(final Path path) throws IOException`
+
+* Arguments :
+
+| | |
+|: |: |
+| `path` | A path which requires blocks storage movement. |
+
+Configurations:
+
+*   **dfs.storage.policy.satisfier.activate** - Used to activate or deactivate 
SPS. Configuring true represents SPS is
+   activated and vice versa.
+
+*   **dfs.storage.policy.satisfier.recheck.timeout.millis** - A timeout to 
re-check the processed block storage movement
+   command results from Co-ordinator Datanode.
+
+*   **dfs.storage.policy.satisfier.self.retry.timeout.millis** - A timeout to 
retry if no block movement results reported from
+   Co-ordinator Datanode in this configured timeout.
+
+### Mover - A New Data Migration Tool
 
 A new data migration tool is added for archiving data. The tool is similar to 
Balancer. It periodically scans the files in HDFS to check if the block 
placement satisfies the storage policy. For the blocks violating the storage 
policy, it moves the replicas to a different storage type in order to fulfill 
the storage policy requirement. Note that it always tries to move block 
replicas within the same node whenever possible. If that is not possible (e.g. 
when a node doesn’t have the target storage type) then it will copy the block 
replicas to another node over the network.
 
@@ -115,6 +151,10 @@ A new data migration tool 

[36/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/547537ec/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
index 0e3a5a3..2257608 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Assert;
@@ -103,8 +104,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -216,8 +217,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -328,8 +329,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 conf.set(DFSConfigKeys
 .DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY,
 "3000");
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)
@@ -420,8 +421,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
 }
 
 final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
+conf.set(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
+StoragePolicySatisfierMode.INTERNAL.toString());
 initConfWithStripe(conf, defaultStripeBlockSize);
 final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
 .numDataNodes(numOfDatanodes)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547537ec/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
index 9a401bd..42b04da 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/sps/TestExternalStoragePolicySatisfier.java
@@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfierMode;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMovementListener;
@@ -54,12 +55,19 @@ public class TestExternalStoragePolicySatisfier
   new StorageType[][]{{StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK},
   {StorageType.DISK, StorageType.DISK}};

[45/50] [abbrv] hadoop git commit: HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13166: [SPS]: Implement caching mechanism to keep LIVE datanodes to 
minimize costly getLiveDatanodeStorageReport() calls. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dc52bd4b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dc52bd4b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dc52bd4b

Branch: refs/heads/HDFS-10285
Commit: dc52bd4bdfde6d5153bd30b34f60ae82bfb6379e
Parents: 538b9bd
Author: Surendra Singh Lilhore 
Authored: Thu Mar 1 00:08:37 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:04:39 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 .../NamenodeProtocolServerSideTranslatorPB.java |  19 --
 .../NamenodeProtocolTranslatorPB.java   |  17 -
 .../hdfs/server/namenode/NameNodeRpcServer.java |  13 -
 .../hdfs/server/namenode/sps/Context.java   |  24 +-
 .../namenode/sps/DatanodeCacheManager.java  | 121 +++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java| 340 ++-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  16 -
 .../hdfs/server/sps/ExternalSPSContext.java |  32 +-
 .../src/main/proto/NamenodeProtocol.proto   |  25 --
 .../src/main/resources/hdfs-default.xml |  11 +
 .../src/site/markdown/ArchivalStorage.md|   2 +-
 .../TestStoragePolicySatisfyWorker.java |   3 +
 .../TestPersistentStoragePolicySatisfier.java   |   6 +
 .../TestStoragePolicySatisfierWithHA.java   |   3 +
 .../sps/TestStoragePolicySatisfier.java |   4 +
 ...stStoragePolicySatisfierWithStripedFile.java |  24 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |   3 +
 19 files changed, 431 insertions(+), 260 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc52bd4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c79bf60..843d9d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -640,6 +640,11 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
   "dfs.storage.policy.satisfier.max.outstanding.paths";
   public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+  // SPS datanode cache config, defaulting to 5mins.
+  public static final String DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS =
+  "dfs.storage.policy.satisfier.datanode.cache.refresh.interval.ms";
+  public static final long DFS_SPS_DATANODE_CACHE_REFRESH_INTERVAL_MS_DEFAULT =
+  30L;
 
   // SPS keytab configurations, by default it is disabled.
   public static final String  DFS_SPS_ADDRESS_KEY =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dc52bd4b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index ed176cc..e4283c6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,8 +23,6 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -277,21 +275,4 @@ public class NamenodeProtocolServerSideTranslatorPB 

[25/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9062df87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
new file mode 100644
index 000..c1a2b8b
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfierWithStripedFile.java
@@ -0,0 +1,580 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Test;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
+ * to be moved and finding its expected target locations in order to satisfy 
the
+ * storage policy.
+ */
+public class TestStoragePolicySatisfierWithStripedFile {
+
+  private static final Logger LOG = LoggerFactory
+  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
+
+  private final int stripesPerBlock = 2;
+
+  private ErasureCodingPolicy ecPolicy;
+  private int dataBlocks;
+  private int parityBlocks;
+  private int cellSize;
+  private int defaultStripeBlockSize;
+
+  private ErasureCodingPolicy getEcPolicy() {
+return StripedFileTestUtil.getDefaultECPolicy();
+  }
+
+  /**
+   * Initialize erasure coding policy.
+   */
+  @Before
+  public void init(){
+ecPolicy = getEcPolicy();
+dataBlocks = ecPolicy.getNumDataUnits();
+parityBlocks = ecPolicy.getNumParityUnits();
+cellSize = ecPolicy.getCellSize();
+defaultStripeBlockSize = cellSize * stripesPerBlock;
+  }
+
+  /**
+   * Tests to verify that all the striped blocks(data + parity blocks) are
+   * moving to satisfy the storage policy.
+   */
+  @Test(timeout = 30)
+  public void testMoverWithFullStripe() throws Exception {
+// start 10 datanodes
+int numOfDatanodes = 10;
+int storagesPerDatanode = 2;
+long capacity = 20 * defaultStripeBlockSize;
+long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
+for (int i = 0; i < numOfDatanodes; i++) {
+  for (int j = 0; j < storagesPerDatanode; j++) {
+capacities[i][j] = capacity;
+  }
+}
+
+final Configuration conf = new HdfsConfiguration();
+conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
+true);
+initConfWithStripe(conf, defaultStripeBlockSize);
+final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+.numDataNodes(numOfDatanodes)
+

[01/50] [abbrv] hadoop git commit: HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more than one coordinator for same file blocks. Contributed by Rakesh R. [Forced Update!]

2018-07-12 Thread rakeshr
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-10285 d35255d43 -> c6a1e5ab4 (forced update)


HDFS-11334: [SPS]: NN switch and rescheduling movements can lead to have more 
than one coordinator for same file blocks. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d37ae23e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d37ae23e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d37ae23e

Branch: refs/heads/HDFS-10285
Commit: d37ae23effad2c4d98f926e8da4a36eb05f2d4cc
Parents: 3bdcb4c
Author: Uma Maheswara Rao G 
Authored: Tue Apr 18 15:23:58 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:00:17 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   2 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |   6 +
 .../server/blockmanagement/DatanodeManager.java |  12 ++
 .../hdfs/server/datanode/BPServiceActor.java|   4 +-
 .../datanode/BlockStorageMovementTracker.java   |  37 +++-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  12 +-
 .../datanode/StoragePolicySatisfyWorker.java|  95 +--
 .../BlockStorageMovementAttemptedItems.java |  80 ++---
 .../server/namenode/StoragePolicySatisfier.java |  15 +-
 .../protocol/BlocksStorageMovementResult.java   |   6 +-
 .../src/main/proto/DatanodeProtocol.proto   |   1 +
 .../TestStoragePolicySatisfyWorker.java |  68 
 .../TestStoragePolicySatisfierWithHA.java   | 170 +--
 13 files changed, 413 insertions(+), 95 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37ae23e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 00152cc..b5341a2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -619,7 +619,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-  30 * 60 * 1000;
+  20 * 60 * 1000;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37ae23e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
index 0c03608..996b986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelper.java
@@ -985,6 +985,9 @@ public class PBHelper {
   case FAILURE:
 status = Status.FAILURE;
 break;
+  case IN_PROGRESS:
+status = Status.IN_PROGRESS;
+break;
   default:
 throw new AssertionError("Unknown status: " + resultProto.getStatus());
   }
@@ -1011,6 +1014,9 @@ public class PBHelper {
   case FAILURE:
 status = BlocksStorageMovementResultProto.Status.FAILURE;
 break;
+  case IN_PROGRESS:
+status = BlocksStorageMovementResultProto.Status.IN_PROGRESS;
+break;
   default:
 throw new AssertionError("Unknown status: " + report.getStatus());
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/d37ae23e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
index da340a8..2d7c80e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
@@ 

[32/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2e7fbd4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index 2a7bde5..9354044 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -72,7 +72,6 @@ import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Test;
-import org.mockito.Mockito;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -147,12 +146,11 @@ public class TestStoragePolicySatisfier {
 startAdditionalDNs(config, 3, numOfDatanodes, newtypes,
 storagesPerDatanode, capacity, hdfsCluster);
 
-dfs.satisfyStoragePolicy(new Path(file));
-
 hdfsCluster.triggerHeartbeats();
+dfs.satisfyStoragePolicy(new Path(file));
 // Wait till namenode notified about the block location details
-DFSTestUtil.waitExpectedStorageType(
-file, StorageType.ARCHIVE, 3, 3, dfs);
+DFSTestUtil.waitExpectedStorageType(file, StorageType.ARCHIVE, 3, 35000,
+dfs);
   }
 
   @Test(timeout = 30)
@@ -1284,6 +1282,7 @@ public class TestStoragePolicySatisfier {
 {StorageType.ARCHIVE, StorageType.SSD},
 {StorageType.DISK, StorageType.DISK}};
 config.setLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
 hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
 storagesPerDatanode, capacity);
 dfs = hdfsCluster.getFileSystem();
@@ -1299,19 +1298,28 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = Mockito.mock(StoragePolicySatisfier.class);
-Mockito.when(sps.isRunning()).thenReturn(true);
-Context ctxt = Mockito.mock(Context.class);
-config.setInt(DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY, 10);
-Mockito.when(ctxt.getConf()).thenReturn(config);
-Mockito.when(ctxt.isRunning()).thenReturn(true);
-Mockito.when(ctxt.isInSafeMode()).thenReturn(false);
-Mockito.when(ctxt.isFileExist(Mockito.anyLong())).thenReturn(true);
-BlockStorageMovementNeeded movmentNeededQueue =
-new BlockStorageMovementNeeded(ctxt);
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(hdfsCluster.getNamesystem(),
+hdfsCluster.getNamesystem().getBlockManager(), sps) {
+  @Override
+  public boolean isInSafeMode() {
+return false;
+  }
+
+  @Override
+  public boolean isRunning() {
+return true;
+  }
+};
+
+FileIdCollector fileIDCollector =
+new IntraSPSNameNodeFileIdCollector(fsDir, sps);
+sps.init(ctxt, fileIDCollector, null);
+sps.getStorageMovementQueue().activate();
+
 INode rootINode = fsDir.getINode("/root");
-movmentNeededQueue.addToPendingDirQueue(rootINode.getId());
-movmentNeededQueue.init(fsDir);
+hdfsCluster.getNamesystem().getBlockManager()
+.addSPSPathId(rootINode.getId());
 
 //Wait for thread to reach U.
 Thread.sleep(1000);
@@ -1321,7 +1329,7 @@ public class TestStoragePolicySatisfier {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1332,7 +1340,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and R,S should not be added in
 // queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  long trackId = movmentNeededQueue.get().getTrackId();
+  long trackId = sps.getStorageMovementQueue().get().getFileId();
   INode inode = fsDir.getInode(trackId);
   assertTrue("Failed to traverse tree, expected " + path + " but got "
   + inode.getFullPathName(), path.equals(inode.getFullPathName()));
@@ -1352,6 +1360,7 @@ public class 

[35/50] [abbrv] hadoop git commit: HDFS-13033: [SPS]: Implement a mechanism to do file block movements for external SPS. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13033: [SPS]: Implement a mechanism to do file block movements for 
external SPS. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b527527
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b527527
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b527527

Branch: refs/heads/HDFS-10285
Commit: 4b527527a79f9c49e706cf06cff962a22d0332d5
Parents: 3797331
Author: Uma Maheswara Rao G 
Authored: Tue Jan 23 16:19:46 2018 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:03:43 2018 +0530

--
 .../hdfs/server/balancer/NameNodeConnector.java |   8 +
 .../hdfs/server/common/sps/BlockDispatcher.java | 186 +
 .../sps/BlockMovementAttemptFinished.java   |  80 ++
 .../server/common/sps/BlockMovementStatus.java  |  53 
 .../common/sps/BlockStorageMovementTracker.java | 184 +
 .../sps/BlocksMovementsStatusHandler.java   |  95 +++
 .../hdfs/server/common/sps/package-info.java|  27 ++
 .../datanode/BlockStorageMovementTracker.java   | 186 -
 .../datanode/StoragePolicySatisfyWorker.java| 271 ++-
 .../hdfs/server/namenode/FSNamesystem.java  |   4 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |   3 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  12 +-
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   3 +-
 .../hdfs/server/namenode/sps/SPSService.java|  14 +-
 .../namenode/sps/StoragePolicySatisfier.java|  30 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java| 233 
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../sps/TestStoragePolicySatisfier.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  69 -
 19 files changed, 997 insertions(+), 469 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b527527/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
index b0dd779..6bfbbb3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
@@ -269,6 +269,14 @@ public class NameNodeConnector implements Closeable {
 }
   }
 
+  /**
+   * Returns fallbackToSimpleAuth. This will be true or false during calls to
+   * indicate if a secure client falls back to simple auth.
+   */
+  public AtomicBoolean getFallbackToSimpleAuth() {
+return fallbackToSimpleAuth;
+  }
+
   @Override
   public void close() {
 keyManager.close();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b527527/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
new file mode 100644
index 000..f87fcae
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/sps/BlockDispatcher.java
@@ -0,0 +1,186 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.common.sps;
+
+import static org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed;
+
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+import 

[46/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/02ebf5d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
index 7580ba9..f5225d2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSBlockMoveTaskHandler.java
@@ -20,13 +20,10 @@ package org.apache.hadoop.hdfs.server.sps;
 
 import java.io.IOException;
 import java.net.Socket;
-import java.util.ArrayList;
-import java.util.List;
 import java.util.concurrent.Callable;
 import java.util.concurrent.CompletionService;
 import java.util.concurrent.ExecutorCompletionService;
 import java.util.concurrent.ExecutorService;
-import java.util.concurrent.Future;
 import java.util.concurrent.SynchronousQueue;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
@@ -39,7 +36,6 @@ import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.datatransfer.TrustedChannelResolver;
@@ -48,15 +44,14 @@ import 
org.apache.hadoop.hdfs.protocol.datatransfer.sasl.SaslDataTransferClient;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.balancer.KeyManager;
 import org.apache.hadoop.hdfs.server.balancer.NameNodeConnector;
+import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementAttemptFinished;
 import org.apache.hadoop.hdfs.server.common.sps.BlockMovementStatus;
 import org.apache.hadoop.hdfs.server.common.sps.BlockStorageMovementTracker;
 import org.apache.hadoop.hdfs.server.common.sps.BlocksMovementsStatusHandler;
-import org.apache.hadoop.hdfs.server.common.sps.BlockDispatcher;
 import org.apache.hadoop.hdfs.server.namenode.sps.BlockMoveTaskHandler;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.Daemon;
 import org.slf4j.Logger;
@@ -105,12 +100,14 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 int ioFileBufferSize = DFSUtilClient.getIoFileBufferSize(conf);
 blkDispatcher = new BlockDispatcher(HdfsConstants.READ_TIMEOUT,
 ioFileBufferSize, connectToDnViaHostname);
+
+startMovementTracker();
   }
 
   /**
* Initializes block movement tracker daemon and starts the thread.
*/
-  public void init() {
+  private void startMovementTracker() {
 movementTrackerThread = new Daemon(this.blkMovementTracker);
 movementTrackerThread.setName("BlockStorageMovementTracker");
 movementTrackerThread.start();
@@ -156,24 +153,16 @@ public class ExternalSPSBlockMoveTaskHandler implements 
BlockMoveTaskHandler {
 // dn.incrementBlocksScheduled(blkMovingInfo.getTargetStorageType());
 LOG.debug("Received BlockMovingTask {}", blkMovingInfo);
 BlockMovingTask blockMovingTask = new BlockMovingTask(blkMovingInfo);
-Future moveCallable = mCompletionServ
-.submit(blockMovingTask);
-blkMovementTracker.addBlock(blkMovingInfo.getBlock(), moveCallable);
+mCompletionServ.submit(blockMovingTask);
   }
 
   private class ExternalBlocksMovementsStatusHandler
-  extends BlocksMovementsStatusHandler {
+  implements BlocksMovementsStatusHandler {
 @Override
-public void handle(
-List moveAttemptFinishedBlks) {
-  List blocks = new ArrayList<>();
-  for (BlockMovementAttemptFinished item : moveAttemptFinishedBlks) {
-blocks.add(item.getBlock());
-  }
-  BlocksStorageMoveAttemptFinished blkAttempted =
-  new BlocksStorageMoveAttemptFinished(
-  blocks.toArray(new Block[blocks.size()]));
-  service.notifyStorageMovementAttemptFinishedBlks(blkAttempted);
+public void handle(BlockMovementAttemptFinished attemptedMove) {
+  service.notifyStorageMovementAttemptFinishedBlk(
+  attemptedMove.getTargetDatanode(), attemptedMove.getTargetType(),
+  attemptedMove.getBlock());
 }
   }
 
@@ -194,6 +183,7 @@ public class 

[49/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier 
file path. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/24bcc8c3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/24bcc8c3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/24bcc8c3

Branch: refs/heads/HDFS-10285
Commit: 24bcc8c3894622c692233bb3704be6a452168577
Parents: 02ebf5d
Author: Uma Maheswara Rao G 
Authored: Mon Jul 2 17:22:00 2018 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:04:55 2018 +0530

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  2 +-
 .../NamenodeProtocolTranslatorPB.java   |  2 +-
 .../server/blockmanagement/BlockManager.java|  2 +-
 .../hdfs/server/namenode/FSNamesystem.java  | 11 ---
 .../hdfs/server/namenode/NameNodeRpcServer.java |  8 +-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  9 ---
 .../sps/BlockStorageMovementAttemptedItems.java | 72 +++--
 .../sps/BlockStorageMovementNeeded.java | 61 ++
 .../hdfs/server/namenode/sps/Context.java   | 45 ---
 .../namenode/sps/DatanodeCacheManager.java  |  4 +-
 .../hdfs/server/namenode/sps/FileCollector.java | 13 +--
 .../namenode/sps/IntraSPSNameNodeContext.java   | 54 +
 .../sps/IntraSPSNameNodeFileIdCollector.java| 14 ++--
 .../hdfs/server/namenode/sps/ItemInfo.java  | 34 
 .../hdfs/server/namenode/sps/SPSService.java| 31 +++
 .../namenode/sps/StoragePolicySatisfier.java| 61 +-
 .../sps/StoragePolicySatisfyManager.java| 20 ++---
 .../hdfs/server/protocol/NamenodeProtocol.java  |  2 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  4 +-
 .../hdfs/server/sps/ExternalSPSContext.java | 85 
 .../sps/ExternalSPSFilePathCollector.java   | 36 +
 .../sps/ExternalStoragePolicySatisfier.java | 30 +--
 .../src/main/proto/NamenodeProtocol.proto   |  2 +-
 .../TestBlockStorageMovementAttemptedItems.java | 16 ++--
 .../sps/TestStoragePolicySatisfier.java | 66 +--
 ...stStoragePolicySatisfierWithStripedFile.java | 41 --
 .../sps/TestExternalStoragePolicySatisfier.java | 35 +++-
 27 files changed, 346 insertions(+), 414 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/24bcc8c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index e4283c6..d9367fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -265,7 +265,7 @@ public class NamenodeProtocolServerSideTranslatorPB 
implements
   RpcController controller, GetNextSPSPathRequestProto request)
   throws ServiceException {
 try {
-  String nextSPSPath = impl.getNextSPSPath();
+  Long nextSPSPath = impl.getNextSPSPath();
   if (nextSPSPath == null) {
 return GetNextSPSPathResponseProto.newBuilder().build();
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24bcc8c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
index 97dee9b..3bd5986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
@@ -267,7 +267,7 @@ public class NamenodeProtocolTranslatorPB implements 
NamenodeProtocol,
   }
 
   @Override
-  public String getNextSPSPath() throws IOException {
+  public Long getNextSPSPath() throws IOException {
 GetNextSPSPathRequestProto req =
 GetNextSPSPathRequestProto.newBuilder().build();
 try {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/24bcc8c3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[23/50] [abbrv] hadoop git commit: HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and HDFS-11968 commits. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-12790: [SPS]: Rebasing HDFS-10285 branch after HDFS-10467, HDFS-12599 and 
HDFS-11968 commits. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c301e365
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c301e365
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c301e365

Branch: refs/heads/HDFS-10285
Commit: c301e3659da5a65b7f11c95ec1a9e9c7e0a26517
Parents: e4c9423
Author: Rakesh Radhakrishnan 
Authored: Fri Nov 10 10:06:43 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:02:55 2018 +0530

--
 .../federation/router/RouterRpcServer.java  |  19 +++
 .../namenode/TestStoragePolicySatisfier.java|   9 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  21 +--
 .../hdfs/tools/TestStoragePolicyCommands.java   |  57 -
 .../TestStoragePolicySatisfyAdminCommands.java  | 127 +++
 5 files changed, 162 insertions(+), 71 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c301e365/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index 7031af7..e80942f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -87,6 +87,7 @@ import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.ReencryptAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.RollingUpgradeAction;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsLocatedFileStatus;
 import org.apache.hadoop.hdfs.protocol.LastBlockWithStatus;
@@ -2512,4 +2513,22 @@ public class RouterRpcServer extends AbstractService
   public FederationRPCMetrics getRPCMetrics() {
 return this.rpcMonitor.getRPCMetrics();
   }
+
+  @Override
+  public void satisfyStoragePolicy(String path) throws IOException {
+checkOperation(OperationCategory.WRITE, false);
+  }
+
+  @Override
+  public boolean isStoragePolicySatisfierRunning() throws IOException {
+checkOperation(OperationCategory.READ, false);
+return false;
+  }
+
+  @Override
+  public StoragePolicySatisfyPathStatus checkStoragePolicySatisfyPathStatus(
+  String path) throws IOException {
+checkOperation(OperationCategory.READ, false);
+return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c301e365/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index f42d911..edd1aca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -61,6 +61,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
 import org.junit.Assert;
@@ -912,8 +913,6 @@ public class TestStoragePolicySatisfier {
 
 int defaultStripedBlockSize =
 StripedFileTestUtil.getDefaultECPolicy().getCellSize() * 4;
-config.set(DFSConfigKeys.DFS_NAMENODE_EC_POLICIES_ENABLED_KEY,
-StripedFileTestUtil.getDefaultECPolicy().getName());
 config.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, defaultStripedBlockSize);
 config.setLong(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1L);
 

[14/50] [abbrv] hadoop git commit: HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to improve thread cleanup time. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-12152: [SPS]: Re-arrange StoragePolicySatisfyWorker stopping sequence to 
improve thread cleanup time. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f2e76f78
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f2e76f78
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f2e76f78

Branch: refs/heads/HDFS-10285
Commit: f2e76f781ef92fc59253e3c1509bd5d701fdb46e
Parents: 2eb29c2
Author: Uma Maheswara Rao G 
Authored: Wed Jul 19 00:55:26 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:02:00 2018 +0530

--
 .../datanode/BlockStorageMovementTracker.java   | 16 
 .../server/datanode/StoragePolicySatisfyWorker.java |  5 +++--
 2 files changed, 15 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e76f78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index c7e952b..f3d2bb6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -77,7 +77,8 @@ public class BlockStorageMovementTracker implements Runnable {
 moverTaskFutures.wait(2000);
   }
 } catch (InterruptedException ignore) {
-  // ignore
+  // Sets interrupt flag of this thread.
+  Thread.currentThread().interrupt();
 }
   }
   try {
@@ -102,12 +103,19 @@ public class BlockStorageMovementTracker implements 
Runnable {
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }
-// handle completed or inprogress blocks movements per trackId.
-blksMovementsStatusHandler.handle(resultPerTrackIdList);
+if (running) {
+  // handle completed or inprogress blocks movements per trackId.
+  blksMovementsStatusHandler.handle(resultPerTrackIdList);
+}
 movementResults.remove(trackId);
   }
 }
-  } catch (ExecutionException | InterruptedException e) {
+  } catch (InterruptedException e) {
+if (running) {
+  LOG.error("Exception while moving block replica to target storage"
+  + " type", e);
+}
+  } catch (ExecutionException e) {
 // TODO: Do we need failure retries and implement the same if required.
 LOG.error("Exception while moving block replica to target storage 
type",
 e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f2e76f78/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index 196cd58..4e57805 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -137,8 +137,8 @@ public class StoragePolicySatisfyWorker {
* thread.
*/
   void stop() {
-movementTrackerThread.interrupt();
 movementTracker.stopTracking();
+movementTrackerThread.interrupt();
   }
 
   /**
@@ -147,7 +147,8 @@ public class StoragePolicySatisfyWorker {
   void waitToFinishWorkerThread() {
 try {
   movementTrackerThread.join(3000);
-} catch (InterruptedException ie) {
+} catch (InterruptedException ignore) {
+  // ignore
 }
   }
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/50] [abbrv] hadoop git commit: HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. Contributed by Uma Maheswara Rao G.

2018-07-12 Thread rakeshr
HDFS-13025. [SPS]: Implement a mechanism to scan the files for external SPS. 
Contributed by Uma Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/37973317
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/37973317
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/37973317

Branch: refs/heads/HDFS-10285
Commit: 3797331729f4cf152db178b3b11f98804a5faf46
Parents: c2e7fbd
Author: Rakesh Radhakrishnan 
Authored: Tue Jan 23 20:09:26 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:03:35 2018 +0530

--
 .../sps/BlockStorageMovementNeeded.java |  70 +++-
 .../hdfs/server/namenode/sps/Context.java   |   8 +
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |   2 +
 .../namenode/sps/IntraSPSNameNodeContext.java   |   7 +
 .../sps/IntraSPSNameNodeFileIdCollector.java|   6 +-
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java|   8 +-
 .../server/sps/ExternalSPSFileIDCollector.java  | 156 +
 .../hadoop/hdfs/server/sps/package-info.java|  28 ++
 .../sps/TestStoragePolicySatisfier.java | 323 ++-
 .../sps/TestExternalStoragePolicySatisfier.java | 108 +++
 11 files changed, 556 insertions(+), 170 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/37973317/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
index 39a0051..b141502 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -97,23 +97,53 @@ public class BlockStorageMovementNeeded {
   }
 
   /**
-   * Add the itemInfo to tracking list for which storage movement
-   * expected if necessary.
+   * Add the itemInfo list to tracking list for which storage movement expected
+   * if necessary.
+   *
* @param startId
-   *- start id
+   *  - start id
* @param itemInfoList
-   *- List of child in the directory
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the start id directory has no more elements to
+   *  scan.
*/
   @VisibleForTesting
-  public synchronized void addAll(long startId,
-  List itemInfoList, boolean scanCompleted) {
+  public synchronized void addAll(long startId, List itemInfoList,
+  boolean scanCompleted) {
 storageMovementNeeded.addAll(itemInfoList);
+updatePendingDirScanStats(startId, itemInfoList.size(), scanCompleted);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement expected if
+   * necessary.
+   *
+   * @param itemInfoList
+   *  - List of child in the directory
+   * @param scanCompleted
+   *  -Indicates whether the ItemInfo start id directory has no more
+   *  elements to scan.
+   */
+  @VisibleForTesting
+  public synchronized void add(ItemInfo itemInfo, boolean scanCompleted) {
+storageMovementNeeded.add(itemInfo);
+// This represents sps start id is file, so no need to update pending dir
+// stats.
+if (itemInfo.getStartId() == itemInfo.getFileId()) {
+  return;
+}
+updatePendingDirScanStats(itemInfo.getStartId(), 1, scanCompleted);
+  }
+
+  private void updatePendingDirScanStats(long startId, int numScannedFiles,
+  boolean scanCompleted) {
 DirPendingWorkInfo pendingWork = pendingWorkForDirectory.get(startId);
 if (pendingWork == null) {
   pendingWork = new DirPendingWorkInfo();
   pendingWorkForDirectory.put(startId, pendingWork);
 }
-pendingWork.addPendingWorkCount(itemInfoList.size());
+pendingWork.addPendingWorkCount(numScannedFiles);
 if (scanCompleted) {
   pendingWork.markScanCompleted();
 }
@@ -250,13 +280,15 @@ public class BlockStorageMovementNeeded {
 
 @Override
 public void run() {
-  LOG.info("Starting FileInodeIdCollector!.");
+  LOG.info("Starting SPSPathIdProcessor!.");
   long lastStatusCleanTime = 0;
+  Long startINodeId = null;
   while (ctxt.isRunning()) {
-LOG.info("Running FileInodeIdCollector!.");
 try {
   if (!ctxt.isInSafeMode()) {
-Long startINodeId = ctxt.getNextSPSPathId();
+if (startINodeId 

[33/50] [abbrv] hadoop git commit: HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for external/internal implementations. Contributed by Uma Maheswara Rao G

2018-07-12 Thread rakeshr
HDFS-12911. [SPS]: Modularize the SPS code and expose necessary interfaces for 
external/internal implementations. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c2e7fbd4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c2e7fbd4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c2e7fbd4

Branch: refs/heads/HDFS-10285
Commit: c2e7fbd4079568d46ef057fd32105a889e7dc7bc
Parents: 18a7bd8
Author: Rakesh Radhakrishnan 
Authored: Fri Jan 19 08:51:49 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:03:27 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  61 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  16 +-
 .../hdfs/server/namenode/FSDirectory.java   |   6 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  10 +-
 .../namenode/sps/BlockMoveTaskHandler.java  |  44 
 .../namenode/sps/BlockMovementListener.java |  40 
 .../sps/BlockStorageMovementAttemptedItems.java |  28 +--
 .../sps/BlockStorageMovementNeeded.java | 207 ---
 .../hdfs/server/namenode/sps/Context.java   |  43 ++--
 .../server/namenode/sps/FileIdCollector.java|  43 
 .../IntraSPSNameNodeBlockMoveTaskHandler.java   |  62 ++
 .../namenode/sps/IntraSPSNameNodeContext.java   |  62 ++
 .../sps/IntraSPSNameNodeFileIdCollector.java| 178 
 .../hdfs/server/namenode/sps/ItemInfo.java  |  81 
 .../hdfs/server/namenode/sps/SPSPathIds.java|  63 ++
 .../hdfs/server/namenode/sps/SPSService.java| 107 ++
 .../namenode/sps/StoragePolicySatisfier.java| 175 +++-
 .../TestBlockStorageMovementAttemptedItems.java |  19 +-
 .../sps/TestStoragePolicySatisfier.java | 111 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  19 +-
 20 files changed, 938 insertions(+), 437 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c2e7fbd4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c2d5162..63117ce 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -93,8 +93,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
+import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -434,7 +434,8 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private Context spsctxt = null;
+  private final SPSPathIds spsPaths;
+
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -481,8 +482,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
-sps = new StoragePolicySatisfier(spsctxt);
+sps = new StoragePolicySatisfier(conf);
+spsPaths = new SPSPathIds();
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5033,8 +5034,7 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-// TODO: FSDirectory will get removed via HDFS-12911 modularization work
-sps.start(false, namesystem.getFSDirectory());
+sps.start(false);
   }
 
   /**
@@ -5070,8 +5070,7 @@ public class BlockManager implements BlockStatsMXBean {
   

[31/50] [abbrv] hadoop git commit: HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-12982 : [SPS]: Reduce the locking and cleanup the Namesystem access. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/18a7bd8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/18a7bd8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/18a7bd8a

Branch: refs/heads/HDFS-10285
Commit: 18a7bd8a09498fd6ce3191654a7f2f8912bf604f
Parents: 9062df8
Author: Surendra Singh Lilhore 
Authored: Mon Jan 8 15:13:11 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:03:20 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  16 +-
 .../blockmanagement/DatanodeDescriptor.java |   2 +-
 .../server/blockmanagement/DatanodeManager.java |  22 ++
 .../server/namenode/FSDirStatAndListingOp.java  |   1 +
 .../hdfs/server/namenode/FSNamesystem.java  |  44 ++-
 .../hdfs/server/namenode/IntraNNSPSContext.java |  41 --
 .../hadoop/hdfs/server/namenode/Namesystem.java |  24 ++
 .../sps/BlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/BlockStorageMovementNeeded.java |  48 ++-
 .../hdfs/server/namenode/sps/Context.java   | 181 +
 .../namenode/sps/IntraSPSNameNodeContext.java   | 220 +++
 .../namenode/sps/StoragePolicySatisfier.java| 374 +--
 .../TestBlockStorageMovementAttemptedItems.java |  17 +-
 .../sps/TestStoragePolicySatisfier.java |  25 +-
 14 files changed, 742 insertions(+), 290 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/18a7bd8a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1cf687e..c2d5162 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
-import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.Context;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
@@ -433,6 +434,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
+  private Context spsctxt = null;
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -479,8 +481,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
-sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
+spsctxt = new IntraSPSNameNodeContext(namesystem, this, conf);
+sps = new StoragePolicySatisfier(spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5031,8 +5033,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(false);
+// TODO: FSDirectory will get removed via HDFS-12911 modularization work
+sps.start(false, namesystem.getFSDirectory());
   }
 
   /**
@@ -5068,8 +5070,8 @@ public class BlockManager implements BlockStatsMXBean {
   LOG.info("Storage policy satisfier is already running.");
   return;
 }
-
-sps.start(true);
+// TODO: FSDirectory will get removed via HDFS-12911 modularization work
+sps.start(true, 

[37/50] [abbrv] hadoop git commit: HDFS-13057: [SPS]: Revisit configurations to make SPS service modes internal/external/none. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13057: [SPS]: Revisit configurations to make SPS service modes 
internal/external/none. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/547537ec
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/547537ec
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/547537ec

Branch: refs/heads/HDFS-10285
Commit: 547537ec7423b9a4f76067482780273b353eb779
Parents: 4b52752
Author: Uma Maheswara Rao G 
Authored: Fri Jan 26 08:57:29 2018 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:03:51 2018 +0530

--
 .../hadoop/hdfs/protocol/HdfsConstants.java |  39 
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   9 +-
 .../server/blockmanagement/BlockManager.java| 105 +++---
 .../hdfs/server/namenode/FSNamesystem.java  |   6 +-
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hadoop/hdfs/server/namenode/NameNode.java   |  34 ++--
 .../sps/BlockStorageMovementNeeded.java |   2 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   3 +
 .../hdfs/server/namenode/sps/SPSService.java|   4 +-
 .../namenode/sps/StoragePolicySatisfier.java|  17 +-
 .../server/sps/ExternalSPSFileIDCollector.java  |  32 ++-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  16 +-
 .../src/main/resources/hdfs-default.xml |  11 +-
 .../src/site/markdown/ArchivalStorage.md|  17 +-
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  45 +++--
 .../hdfs/server/mover/TestStorageMover.java |   4 +-
 .../namenode/TestNameNodeReconfigure.java   | 105 +-
 .../TestPersistentStoragePolicySatisfier.java   |   9 +-
 .../TestStoragePolicySatisfierWithHA.java   |  12 +-
 .../sps/TestStoragePolicySatisfier.java | 202 +++
 ...stStoragePolicySatisfierWithStripedFile.java |  17 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 112 +++---
 .../hdfs/tools/TestStoragePolicyCommands.java   |   5 +-
 .../TestStoragePolicySatisfyAdminCommands.java  |  14 +-
 25 files changed, 500 insertions(+), 327 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/547537ec/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
index aabcdd9..ab48dcd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/HdfsConstants.java
@@ -129,6 +129,45 @@ public final class HdfsConstants {
   }
 
   /**
+   * Storage policy satisfier service modes.
+   */
+  public enum StoragePolicySatisfierMode {
+
+/**
+ * This mode represents that SPS service is running inside Namenode and can
+ * accept any SPS call request.
+ */
+INTERNAL,
+
+/**
+ * This mode represents that SPS service is running outside Namenode as an
+ * external service and can accept any SPS call request.
+ */
+EXTERNAL,
+
+/**
+ * This mode represents that SPS service is disabled and cannot accept any
+ * SPS call request.
+ */
+NONE;
+
+private static final Map MAP =
+new HashMap<>();
+
+static {
+  for (StoragePolicySatisfierMode a : values()) {
+MAP.put(a.name(), a);
+  }
+}
+
+/** Convert the given String to a StoragePolicySatisfierMode. */
+public static StoragePolicySatisfierMode fromString(String s) {
+  return MAP.get(StringUtils.toUpperCase(s));
+}
+  }
+
+
+  /**
* Storage policy satisfy path status.
*/
   public enum StoragePolicySatisfyPathStatus {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/547537ec/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index fc95d8c..0d93ff3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.net.DFSNetworkTopology;
 import 

[04/50] [abbrv] hadoop git commit: HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when dropSPSWork() called. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11883: [SPS] : Handle NPE in BlockStorageMovementTracker when 
dropSPSWork() called. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dacec33f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dacec33f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dacec33f

Branch: refs/heads/HDFS-10285
Commit: dacec33faf46552295250c2873f8b5099051815a
Parents: a61e1f3
Author: Uma Maheswara Rao G 
Authored: Tue May 30 18:12:17 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:00:41 2018 +0530

--
 .../hdfs/server/datanode/BlockStorageMovementTracker.java  | 6 +-
 1 file changed, 5 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dacec33f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
index 99858bc..c7e952b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockStorageMovementTracker.java
@@ -88,13 +88,17 @@ public class BlockStorageMovementTracker implements 
Runnable {
   long trackId = result.getTrackId();
   List> blocksMoving = moverTaskFutures
   .get(trackId);
+  if (blocksMoving == null) {
+LOG.warn("Future task doesn't exist for trackId " + trackId);
+continue;
+  }
   blocksMoving.remove(future);
 
   List resultPerTrackIdList =
   addMovementResultToTrackIdList(result);
 
   // Completed all the scheduled blocks movement under this 'trackId'.
-  if (blocksMoving.isEmpty()) {
+  if (blocksMoving.isEmpty() || moverTaskFutures.get(trackId) == null) 
{
 synchronized (moverTaskFutures) {
   moverTaskFutures.remove(trackId);
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/50] [abbrv] hadoop git commit: HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13077. [SPS]: Fix review comments of external storage policy satisfier. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fd070b28
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fd070b28
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fd070b28

Branch: refs/heads/HDFS-10285
Commit: fd070b2871117dbf99b62587d9df3616ffde6dcc
Parents: 5213c19
Author: Surendra Singh Lilhore 
Authored: Mon Jan 29 23:59:55 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:04:15 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |  14 +-
 .../server/blockmanagement/BlockManager.java|  33 +++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  15 ++
 .../hdfs/server/namenode/FSNamesystem.java  |  41 ++--
 .../hdfs/server/namenode/NameNodeRpcServer.java |  11 ++
 .../hdfs/server/namenode/sps/SPSPathIds.java|   8 +-
 .../namenode/sps/StoragePolicySatisfier.java|   6 +-
 .../hdfs/server/sps/ExternalSPSContext.java |   4 +
 .../sps/ExternalStoragePolicySatisfier.java |  30 ++-
 .../sps/TestStoragePolicySatisfier.java |   7 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 195 ++-
 11 files changed, 323 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd070b28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index 0d93ff3..c79bf60 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -608,7 +608,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
   public static final String  DFS_MOVER_MAX_NO_MOVE_INTERVAL_KEY = 
"dfs.mover.max-no-move-interval";
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
-  // SPS related configurations
+  // StoragePolicySatisfier (SPS) related configurations
   public static final String  DFS_STORAGE_POLICY_SATISFIER_MODE_KEY =
   "dfs.storage.policy.satisfier.mode";
   public static final String DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT =
@@ -637,6 +637,18 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.low.max-streams.preference";
   public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_LOW_MAX_STREAMS_PREFERENCE_DEFAULT =
   true;
+  public static final String DFS_SPS_MAX_OUTSTANDING_PATHS_KEY =
+  "dfs.storage.policy.satisfier.max.outstanding.paths";
+  public static final int DFS_SPS_MAX_OUTSTANDING_PATHS_DEFAULT = 1;
+
+  // SPS keytab configurations, by default it is disabled.
+  public static final String  DFS_SPS_ADDRESS_KEY =
+  "dfs.storage.policy.satisfier.address";
+  public static final String  DFS_SPS_ADDRESS_DEFAULT= "0.0.0.0:0";
+  public static final String  DFS_SPS_KEYTAB_FILE_KEY =
+  "dfs.storage.policy.satisfier.keytab.file";
+  public static final String  DFS_SPS_KERBEROS_PRINCIPAL_KEY =
+  "dfs.storage.policy.satisfier.kerberos.principal";
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/fd070b28/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index f348a33..00a91a9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -439,6 +439,7 @@ public class BlockManager implements BlockStatsMXBean {
   private final boolean storagePolicyEnabled;
   private StoragePolicySatisfierMode spsMode;
   private SPSPathIds spsPaths;
+  private final int spsOutstandingPathsLimit;
 
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
@@ -478,14 +479,16 @@ public class BlockManager implements BlockStatsMXBean {
 

[48/50] [abbrv] hadoop git commit: HDFS-13381 : [SPS]: Use DFSUtilClient#makePathFromFileId() to prepare satisfier file path. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/24bcc8c3/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
index b05717a..ec5307b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -108,8 +108,6 @@ public class TestStoragePolicySatisfier {
   public static final long CAPACITY = 2 * 256 * 1024 * 1024;
   public static final String FILE = "/testMoveToSatisfyStoragePolicy";
   public static final int DEFAULT_BLOCK_SIZE = 1024;
-  private ExternalBlockMovementListener blkMoveListener =
-  new ExternalBlockMovementListener();
 
   /**
* Sets hdfs cluster.
@@ -1282,8 +1280,8 @@ public class TestStoragePolicySatisfier {
 
 //Queue limit can control the traverse logic to wait for some free
 //entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1297,8 +1295,7 @@ public class TestStoragePolicySatisfier {
   }
 };
 
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1314,13 +1311,6 @@ public class TestStoragePolicySatisfier {
 dfs.delete(new Path("/root"), true);
   }
 
-  public FileCollector createFileIdCollector(
-  StoragePolicySatisfier sps, Context ctxt) {
-FileCollector fileIDCollector = new IntraSPSNameNodeFileIdCollector(
-hdfsCluster.getNamesystem().getFSDirectory(), sps);
-return fileIDCollector;
-  }
-
   /**
*  Test traverse when root parent got deleted.
*  1. Delete L when traversing Q
@@ -1351,8 +1341,8 @@ public class TestStoragePolicySatisfier {
 
 // Queue limit can control the traverse logic to wait for some free
 // entry in queue. After 10 files, traverse control will be on U.
-StoragePolicySatisfier sps = new 
StoragePolicySatisfier(config);
-Context ctxt = new IntraSPSNameNodeContext(
+StoragePolicySatisfier sps = new StoragePolicySatisfier(config);
+Context ctxt = new IntraSPSNameNodeContext(
 hdfsCluster.getNamesystem(),
 hdfsCluster.getNamesystem().getBlockManager(), sps) {
   @Override
@@ -1365,8 +1355,7 @@ public class TestStoragePolicySatisfier {
 return true;
   }
 };
-FileCollector fileIDCollector = createFileIdCollector(sps, ctxt);
-sps.init(ctxt, fileIDCollector, null, null);
+sps.init(ctxt);
 sps.getStorageMovementQueue().activate();
 
 INode rootINode = fsDir.getINode("/root");
@@ -1383,12 +1372,12 @@ public class TestStoragePolicySatisfier {
   }
 
   private void assertTraversal(List expectedTraverseOrder,
-  FSDirectory fsDir, StoragePolicySatisfier sps)
+  FSDirectory fsDir, StoragePolicySatisfier sps)
   throws InterruptedException {
 // Remove 10 element and make queue free, So other traversing will start.
 for (int i = 0; i < 10; i++) {
   String path = expectedTraverseOrder.remove(0);
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1403,7 +1392,7 @@ public class TestStoragePolicySatisfier {
 // Check other element traversed in order and E, M, U, R, S should not be
 // added in queue which we already removed from expected list
 for (String path : expectedTraverseOrder) {
-  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
+  ItemInfo itemInfo = sps.getStorageMovementQueue().get();
   if (itemInfo == null) {
 continue;
   }
@@ -1717,17 +1706,17 @@ public class TestStoragePolicySatisfier {
   public void waitForAttemptedItems(long expectedBlkMovAttemptedCount,
   int timeout) throws TimeoutException, InterruptedException {
 BlockManager blockManager = hdfsCluster.getNamesystem().getBlockManager();
-final StoragePolicySatisfier sps =
-(StoragePolicySatisfier) blockManager.getSPSManager()
+final StoragePolicySatisfier sps =
+

[12/50] [abbrv] hadoop git commit: HDFS-12146. [SPS]: Fix TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-12146. [SPS]: Fix 
TestStoragePolicySatisfierWithStripedFile#testSPSWhenFileHasLowRedundancyBlocks.
 Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ac8f8e8a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ac8f8e8a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ac8f8e8a

Branch: refs/heads/HDFS-10285
Commit: ac8f8e8a8313fb6b2a9c2c7820c68ad1aa2f5577
Parents: ac0e71c
Author: Rakesh Radhakrishnan 
Authored: Mon Jul 17 22:40:03 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:01:44 2018 +0530

--
 .../server/namenode/TestStoragePolicySatisfier.java |  9 +
 .../TestStoragePolicySatisfierWithStripedFile.java  | 16 
 2 files changed, 13 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac8f8e8a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index be7236b..10ceae7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -1025,12 +1025,13 @@ public class TestStoragePolicySatisfier {
   list.add(cluster.stopDataNode(0));
   list.add(cluster.stopDataNode(0));
   cluster.restartNameNodes();
-  cluster.restartDataNode(list.get(0), true);
-  cluster.restartDataNode(list.get(1), true);
+  cluster.restartDataNode(list.get(0), false);
+  cluster.restartDataNode(list.get(1), false);
   cluster.waitActive();
   fs.satisfyStoragePolicy(filePath);
-  Thread.sleep(3000 * 6);
-  cluster.restartDataNode(list.get(2), true);
+  DFSTestUtil.waitExpectedStorageType(filePath.toString(),
+  StorageType.ARCHIVE, 2, 3, cluster.getFileSystem());
+  cluster.restartDataNode(list.get(2), false);
   DFSTestUtil.waitExpectedStorageType(filePath.toString(),
   StorageType.ARCHIVE, 3, 3, cluster.getFileSystem());
 } finally {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ac8f8e8a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
index f905ead..c070113 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
@@ -308,8 +308,8 @@ public class TestStoragePolicySatisfierWithStripedFile {
*/
   @Test(timeout = 30)
   public void testSPSWhenFileHasLowRedundancyBlocks() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
+// start 9 datanodes
+int numOfDatanodes = 9;
 int storagesPerDatanode = 2;
 long capacity = 20 * defaultStripeBlockSize;
 long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
@@ -338,7 +338,6 @@ public class TestStoragePolicySatisfierWithStripedFile {
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE},
-{StorageType.DISK, StorageType.ARCHIVE},
 {StorageType.DISK, StorageType.ARCHIVE}})
 .storageCapacities(capacities)
 .build();
@@ -366,15 +365,16 @@ public class TestStoragePolicySatisfierWithStripedFile {
   }
   cluster.restartNameNodes();
   // Restart half datanodes
-  for (int i = 0; i < numOfDatanodes / 2; i++) {
-cluster.restartDataNode(list.get(i), true);
+  for (int i = 0; i < 5; i++) {
+cluster.restartDataNode(list.get(i), false);
   }
   cluster.waitActive();
   fs.satisfyStoragePolicy(fooFile);
-  Thread.sleep(3000 * 6);
+  DFSTestUtil.waitExpectedStorageType(fooFile.toString(),
+  StorageType.ARCHIVE, 5, 3, cluster.getFileSystem());
   //Start reaming datanodes
-  

[06/50] [abbrv] hadoop git commit: HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type as source and destination in same datanode. Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11726. [SPS]: StoragePolicySatisfier should not select same storage type 
as source and destination in same datanode. Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b483da8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b483da8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b483da8

Branch: refs/heads/HDFS-10285
Commit: 0b483da8d5fb943adbcd14f91c826a0d191cb872
Parents: ab0f412
Author: Rakesh Radhakrishnan 
Authored: Fri Jun 9 14:03:13 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:00:56 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 23 ++
 .../namenode/TestStoragePolicySatisfier.java| 44 
 2 files changed, 58 insertions(+), 9 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b483da8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 9e2a4a0..1b2afa3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -501,15 +501,20 @@ public class StoragePolicySatisfier implements Runnable {
 // avoid choosing a target which already has this block.
 for (int i = 0; i < sourceWithStorageList.size(); i++) {
   StorageTypeNodePair existingTypeNodePair = sourceWithStorageList.get(i);
-  StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(blockInfo,
-  existingTypeNodePair.dn, expected);
-  if (chosenTarget != null) {
-sourceNodes.add(existingTypeNodePair.dn);
-sourceStorageTypes.add(existingTypeNodePair.storageType);
-targetNodes.add(chosenTarget.dn);
-targetStorageTypes.add(chosenTarget.storageType);
-expected.remove(chosenTarget.storageType);
-// TODO: We can increment scheduled block count for this node?
+
+  // Check whether the block replica is already placed in the expected
+  // storage type in this source datanode.
+  if (!expected.contains(existingTypeNodePair.storageType)) {
+StorageTypeNodePair chosenTarget = chooseTargetTypeInSameNode(
+blockInfo, existingTypeNodePair.dn, expected);
+if (chosenTarget != null) {
+  sourceNodes.add(existingTypeNodePair.dn);
+  sourceStorageTypes.add(existingTypeNodePair.storageType);
+  targetNodes.add(chosenTarget.dn);
+  targetStorageTypes.add(chosenTarget.storageType);
+  expected.remove(chosenTarget.storageType);
+  // TODO: We can increment scheduled block count for this node?
+}
   }
   // To avoid choosing this excludeNodes as targets later
   excludeNodes.add(existingTypeNodePair.dn);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b483da8/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index 8e08a1e..f1a4169 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -764,6 +764,50 @@ public class TestStoragePolicySatisfier {
   }
 
   /**
+   * If replica with expected storage type already exist in source DN then that
+   * DN should be skipped.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenReplicaWithExpectedStorageAlreadyAvailableInSource()
+  throws Exception {
+StorageType[][] diskTypes = new StorageType[][] {
+{StorageType.DISK, StorageType.ARCHIVE},
+{StorageType.DISK, StorageType.ARCHIVE},
+{StorageType.DISK, StorageType.ARCHIVE}};
+
+try {
+  hdfsCluster = startCluster(config, diskTypes, diskTypes.length,
+  storagesPerDatanode, capacity);
+  dfs = hdfsCluster.getFileSystem();
+  // 1. Write two replica on disk
+  DFSTestUtil.createFile(dfs, new Path(file), DEFAULT_BLOCK_SIZE,
+  (short) 2, 

[08/50] [abbrv] hadoop git commit: HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11670: [SPS]: Add CLI command for satisfy storage policy operations. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e64323e2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e64323e2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e64323e2

Branch: refs/heads/HDFS-10285
Commit: e64323e2c5fd16d1e72d6552c6d07558b817f00b
Parents: 4274dcc
Author: Uma Maheswara Rao G 
Authored: Mon Jun 19 17:16:49 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:01:12 2018 +0530

--
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   | 93 +++-
 .../src/site/markdown/ArchivalStorage.md| 21 +
 .../src/site/markdown/HDFSCommands.md   |  2 +
 .../hdfs/tools/TestStoragePolicyCommands.java   | 43 -
 4 files changed, 157 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e64323e2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
index aeb10d9..662957c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/StoragePolicyAdmin.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
@@ -32,6 +33,8 @@ import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 
 import java.io.FileNotFoundException;
+import com.google.common.base.Joiner;
+
 import java.io.IOException;
 import java.util.Arrays;
 import java.util.Collection;
@@ -245,6 +248,92 @@ public class StoragePolicyAdmin extends Configured 
implements Tool {
 }
   }
 
+  /** Command to schedule blocks to move based on specified policy. */
+  private static class SatisfyStoragePolicyCommand implements
+  AdminHelper.Command {
+@Override
+public String getName() {
+  return "-satisfyStoragePolicy";
+}
+
+@Override
+public String getShortUsage() {
+  return "[" + getName() + " -path ]\n";
+}
+
+@Override
+public String getLongUsage() {
+  TableListing listing = AdminHelper.getOptionDescriptionListing();
+  listing.addRow("", "The path of the file/directory to satisfy"
+  + " storage policy");
+  return getShortUsage() + "\n" +
+  "Schedule blocks to move based on file/directory policy.\n\n" +
+  listing.toString();
+}
+
+@Override
+public int run(Configuration conf, List args) throws IOException {
+  final String path = StringUtils.popOptionWithArgument("-path", args);
+  if (path == null) {
+System.err.println("Please specify the path for setting the storage " +
+"policy.\nUsage: " + getLongUsage());
+return 1;
+  }
+
+  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  try {
+dfs.satisfyStoragePolicy(new Path(path));
+System.out.println("Scheduled blocks to move based on the current"
++ " storage policy on " + path);
+  } catch (Exception e) {
+System.err.println(AdminHelper.prettifyException(e));
+return 2;
+  }
+  return 0;
+}
+  }
+
+  /** Command to check storage policy satisfier status. */
+  private static class IsSPSRunningCommand implements AdminHelper.Command {
+@Override
+public String getName() {
+  return "-isSPSRunning";
+}
+
+@Override
+public String getShortUsage() {
+  return "[" + getName() + "]\n";
+}
+
+@Override
+public String getLongUsage() {
+  return getShortUsage() + "\n" +
+  "Check the status of Storage Policy Statisfier.\n\n";
+}
+
+@Override
+public int run(Configuration conf, List args) throws IOException {
+  if (!args.isEmpty()) {
+System.err.print("Can't understand arguments: "
++ Joiner.on(" ").join(args) + "\n");
+System.err.println("Usage is " + getLongUsage());
+return 1;
+  }
+  final DistributedFileSystem dfs = AdminHelper.getDFS(conf);
+  try {
+if(dfs.getClient().isStoragePolicySatisfierRunning()){
+  

[02/50] [abbrv] hadoop git commit: HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy for a file. Contributed by Uma Maheswara Rao G

2018-07-12 Thread rakeshr
HDFS-11572. [SPS]: SPS should clean Xattrs when no blocks required to satisfy 
for a file. Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/95f64fb6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/95f64fb6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/95f64fb6

Branch: refs/heads/HDFS-10285
Commit: 95f64fb644f15adb29906f16e93c55a23a58ed57
Parents: d37ae23
Author: Rakesh Radhakrishnan 
Authored: Thu Apr 20 23:14:36 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:00:25 2018 +0530

--
 .../BlockStorageMovementAttemptedItems.java |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 116 ++-
 .../org/apache/hadoop/hdfs/DFSTestUtil.java |  35 ++
 .../TestPersistentStoragePolicySatisfier.java   |  52 +
 .../namenode/TestStoragePolicySatisfier.java|  76 
 5 files changed, 225 insertions(+), 56 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/95f64fb6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index f2406da..bf7859c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -333,7 +333,7 @@ public class BlockStorageMovementAttemptedItems {
   + "doesn't exists in storageMovementAttemptedItems list",
   storageMovementAttemptedResult.getTrackId());
   // Remove xattr for the track id.
-  this.sps.notifyBlkStorageMovementFinished(
+  this.sps.postBlkStorageMovementCleanup(
   storageMovementAttemptedResult.getTrackId());
 }
 break;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/95f64fb6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 8be0a2a..3b20314 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -79,6 +79,27 @@ public class StoragePolicySatisfier implements Runnable {
   private final BlockStorageMovementAttemptedItems storageMovementsMonitor;
   private volatile boolean isRunning = false;
 
+  /**
+   * Represents the collective analysis status for all blocks.
+   */
+  private enum BlocksMovingAnalysisStatus {
+// Represents that, the analysis skipped due to some conditions. A such
+// condition is if block collection is in incomplete state.
+ANALYSIS_SKIPPED_FOR_RETRY,
+// Represents that, all block storage movement needed blocks found its
+// targets.
+ALL_BLOCKS_TARGETS_PAIRED,
+// Represents that, only fewer or none of the block storage movement needed
+// block found its eligible targets.
+FEW_BLOCKS_TARGETS_PAIRED,
+// Represents that, none of the blocks found for block storage movements.
+BLOCKS_ALREADY_SATISFIED,
+// Represents that, the analysis skipped due to some conditions.
+// Example conditions are if no blocks really exists in block collection or
+// if analysis is not required on ec files with unsuitable storage policies
+BLOCKS_TARGET_PAIRING_SKIPPED;
+  }
+
   public StoragePolicySatisfier(final Namesystem namesystem,
   final BlockStorageMovementNeeded storageMovementNeeded,
   final BlockManager blkManager, Configuration conf) {
@@ -208,10 +229,31 @@ public class StoragePolicySatisfier implements Runnable {
 namesystem.getBlockCollection(blockCollectionID);
 // Check blockCollectionId existence.
 if (blockCollection != null) {
-  boolean allBlockLocsAttemptedToSatisfy =
-  
computeAndAssignStorageMismatchedBlocksToDNs(blockCollection);
-  this.storageMovementsMonitor
-

[15/50] [abbrv] hadoop git commit: HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-12214: [SPS]: Fix review comments of StoragePolicySatisfier feature. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/544eb25f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/544eb25f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/544eb25f

Branch: refs/heads/HDFS-10285
Commit: 544eb25fd499cf9b90cdb9c564e98dfc349a7896
Parents: f2e76f7
Author: Uma Maheswara Rao G 
Authored: Thu Aug 17 13:21:07 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:02:08 2018 +0530

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../server/blockmanagement/BlockManager.java| 104 +++
 .../BlockStorageMovementAttemptedItems.java |   4 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  20 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  22 ++--
 .../server/namenode/StoragePolicySatisfier.java |  20 ++--
 .../protocol/BlocksStorageMovementResult.java   |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |  11 +-
 .../src/main/resources/hdfs-default.xml |  10 +-
 .../src/site/markdown/ArchivalStorage.md|  14 +--
 .../src/site/markdown/HDFSCommands.md   |   2 +-
 .../TestStoragePolicySatisfyWorker.java |   2 +-
 .../hadoop/hdfs/server/mover/TestMover.java |  22 ++--
 .../hdfs/server/mover/TestStorageMover.java |   2 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  99 --
 .../TestPersistentStoragePolicySatisfier.java   |   6 +-
 .../namenode/TestStoragePolicySatisfier.java|  35 +--
 .../TestStoragePolicySatisfierWithHA.java   |  10 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   8 ++
 .../hdfs/tools/TestStoragePolicyCommands.java   |  21 ++--
 22 files changed, 265 insertions(+), 161 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/544eb25f/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 38be348b..bc6e7a4 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -62,7 +62,7 @@ function hadoop_usage
   hadoop_add_subcommand "portmap" daemon "run a portmap service"
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
-  hadoop_add_subcommand "storagepolicies" admin "list/get/set block storage 
policies"
+  hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false

http://git-wip-us.apache.org/repos/asf/hadoop/blob/544eb25f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index b5341a2..cf5206a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -608,10 +608,10 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final intDFS_MOVER_MAX_NO_MOVE_INTERVAL_DEFAULT = 60*1000; 
// One minute
 
   // SPS related configurations
-  public static final String  DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_KEY =
-  "dfs.storage.policy.satisfier.activate";
-  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ACTIVATE_DEFAULT =
-  true;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY =
+  "dfs.storage.policy.satisfier.enabled";
+  public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
+  false;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/544eb25f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

[44/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by 
external satisfier. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/538b9bd9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/538b9bd9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/538b9bd9

Branch: refs/heads/HDFS-10285
Commit: 538b9bd93b27700320e0b212026d3846c93f5da0
Parents: f5b34b4
Author: Rakesh Radhakrishnan 
Authored: Fri Feb 16 17:01:38 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:04:31 2018 +0530

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  46 +
 .../NamenodeProtocolTranslatorPB.java   |  42 +
 .../hdfs/server/namenode/FSTreeTraverser.java   |   2 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  32 +---
 .../server/namenode/ReencryptionHandler.java|   2 +-
 .../sps/BlockStorageMovementAttemptedItems.java |  42 +++--
 .../sps/BlockStorageMovementNeeded.java | 119 +++--
 .../hdfs/server/namenode/sps/Context.java   |  55 +++---
 .../hdfs/server/namenode/sps/FileCollector.java |  48 +
 .../server/namenode/sps/FileIdCollector.java|  43 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |  39 ++---
 .../sps/IntraSPSNameNodeFileIdCollector.java|  23 +--
 .../hdfs/server/namenode/sps/ItemInfo.java  |  39 +++--
 .../hdfs/server/namenode/sps/SPSService.java|  32 ++--
 .../namenode/sps/StoragePolicySatisfier.java| 129 +-
 .../sps/StoragePolicySatisfyManager.java|   6 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  24 +--
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   4 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  60 +++
 .../server/sps/ExternalSPSFileIDCollector.java  | 174 ---
 .../sps/ExternalSPSFilePathCollector.java   | 172 ++
 .../sps/ExternalStoragePolicySatisfier.java |   7 +-
 .../src/main/proto/NamenodeProtocol.proto   |  27 +--
 .../TestBlockStorageMovementAttemptedItems.java |  27 ++-
 .../sps/TestStoragePolicySatisfier.java |  52 +++---
 ...stStoragePolicySatisfierWithStripedFile.java |  15 +-
 .../sps/TestExternalStoragePolicySatisfier.java | 148 +++-
 27 files changed, 701 insertions(+), 708 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/538b9bd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 25eafdf..ed176cc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -35,16 +35,12 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
-import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 

[42/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-07-12 Thread rakeshr
HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by 
Surendra Singh.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f5b34b43
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f5b34b43
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f5b34b43

Branch: refs/heads/HDFS-10285
Commit: f5b34b43f17bf1dd0bb8d4726024338ed1c38607
Parents: fd070b2
Author: Uma Maheswara Rao G 
Authored: Wed Feb 7 02:28:23 2018 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:04:23 2018 +0530

--
 .../java/org/apache/hadoop/hdfs/DFSClient.java  |   4 +-
 .../hadoop/hdfs/protocol/ClientProtocol.java|   6 +-
 .../ClientNamenodeProtocolTranslatorPB.java |  14 +-
 .../src/main/proto/ClientNamenodeProtocol.proto |   8 +-
 .../federation/router/RouterRpcServer.java  |   2 +-
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  61 ---
 ...tNamenodeProtocolServerSideTranslatorPB.java |  16 +-
 .../server/blockmanagement/BlockManager.java| 255 +---
 .../blockmanagement/DatanodeDescriptor.java |  33 +-
 .../hdfs/server/common/HdfsServerConstants.java |   2 +-
 .../datanode/StoragePolicySatisfyWorker.java|  15 +-
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  26 +-
 .../server/namenode/FSDirStatAndListingOp.java  |   1 -
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  46 +--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  30 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  21 +-
 .../sps/BlockStorageMovementNeeded.java |   4 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |   6 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|  70 
 .../hdfs/server/namenode/sps/SPSService.java|  10 +-
 .../namenode/sps/StoragePolicySatisfier.java| 137 ---
 .../sps/StoragePolicySatisfyManager.java| 399 +++
 .../sps/ExternalStoragePolicySatisfier.java |   2 +-
 .../hadoop/hdfs/tools/StoragePolicyAdmin.java   |   2 +-
 .../namenode/TestNameNodeReconfigure.java   |  19 +-
 .../TestPersistentStoragePolicySatisfier.java   |   3 +-
 .../TestStoragePolicySatisfierWithHA.java   |   6 +-
 .../sps/TestStoragePolicySatisfier.java |  35 +-
 ...stStoragePolicySatisfierWithStripedFile.java |   6 +-
 .../sps/TestExternalStoragePolicySatisfier.java |  24 +-
 33 files changed, 665 insertions(+), 604 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5b34b43/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
index 471ab2c..b6f9bdd 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
@@ -3110,8 +3110,8 @@ public class DFSClient implements java.io.Closeable, 
RemotePeerFactory,
 }
   }
 
-  public boolean isStoragePolicySatisfierRunning() throws IOException {
-return namenode.isStoragePolicySatisfierRunning();
+  public boolean isInternalSatisfierRunning() throws IOException {
+return namenode.isInternalSatisfierRunning();
   }
 
   Tracer getTracer() {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5b34b43/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
index 360fd63..5c51c22 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
@@ -1759,12 +1759,12 @@ public interface ClientProtocol {
   void satisfyStoragePolicy(String path) throws IOException;
 
   /**
-   * Check if StoragePolicySatisfier is running.
-   * @return true if StoragePolicySatisfier is running
+   * Check if internal StoragePolicySatisfier is running.
+   * @return true if internal StoragePolicySatisfier is running
* @throws IOException
*/
   @Idempotent
-  boolean isStoragePolicySatisfierRunning() throws IOException;
+  boolean 

[38/50] [abbrv] hadoop git commit: HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma Maheswara Rao G.

2018-07-12 Thread rakeshr
HDFS-13075. [SPS]: Provide External Context implementation. Contributed by Uma 
Maheswara Rao G.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/659bfd70
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/659bfd70
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/659bfd70

Branch: refs/heads/HDFS-10285
Commit: 659bfd70d5b130084edb66231d339355a1c4e09c
Parents: 547537e
Author: Surendra Singh Lilhore 
Authored: Sun Jan 28 20:46:56 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:03:59 2018 +0530

--
 .../NamenodeProtocolServerSideTranslatorPB.java |  67 +
 .../NamenodeProtocolTranslatorPB.java   |  58 
 .../hdfs/server/balancer/NameNodeConnector.java |  28 +-
 .../server/blockmanagement/BlockManager.java|  19 ++
 .../server/blockmanagement/DatanodeManager.java |  18 ++
 .../hdfs/server/common/HdfsServerConstants.java |   3 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  53 +++-
 .../sps/BlockStorageMovementNeeded.java |   8 +-
 .../hdfs/server/namenode/sps/Context.java   |   9 +-
 .../namenode/sps/IntraSPSNameNodeContext.java   |  23 +-
 .../namenode/sps/StoragePolicySatisfier.java|  15 +-
 .../hdfs/server/protocol/NamenodeProtocol.java  |  46 +++-
 .../hdfs/server/sps/ExternalSPSContext.java | 271 +++
 .../src/main/proto/NamenodeProtocol.proto   |  57 
 .../sps/TestExternalStoragePolicySatisfier.java |  31 +--
 15 files changed, 652 insertions(+), 54 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/659bfd70/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
index 90c2c49..25eafdf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java
@@ -23,6 +23,8 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo.DatanodeInfoBuilder;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.CheckDNSpaceResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto;
@@ -33,10 +35,16 @@ import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksReq
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetFilePathResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetNextSPSPathIdResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetTransactionIdResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.HasLowRedundancyBlocksResponseProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.IsRollingUpgradeResponseProto;
 import 

[39/50] [abbrv] hadoop git commit: HDFS-13050: [SPS]: Create start/stop script to start external SPS process. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-13050: [SPS]: Create start/stop script to start external SPS process. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5213c199
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5213c199
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5213c199

Branch: refs/heads/HDFS-10285
Commit: 5213c199544b51522a7ca6e9812fa2c6d3b10ce2
Parents: 659bfd7
Author: Rakesh Radhakrishnan 
Authored: Mon Jan 29 03:10:48 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:04:07 2018 +0530

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   5 +
 .../server/blockmanagement/BlockManager.java|   9 ++
 .../apache/hadoop/hdfs/server/mover/Mover.java  |   2 +-
 .../hdfs/server/namenode/sps/Context.java   |   5 -
 .../namenode/sps/IntraSPSNameNodeContext.java   |   4 -
 .../sps/IntraSPSNameNodeFileIdCollector.java|  12 +-
 .../hdfs/server/namenode/sps/SPSPathIds.java|   1 +
 .../namenode/sps/StoragePolicySatisfier.java|  83 +++-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|   2 +-
 .../hdfs/server/sps/ExternalSPSContext.java |  57 +---
 .../server/sps/ExternalSPSFileIDCollector.java  |  12 +-
 .../sps/ExternalStoragePolicySatisfier.java | 130 +++
 .../src/site/markdown/ArchivalStorage.md|  10 +-
 .../sps/TestStoragePolicySatisfier.java |  22 ++--
 .../sps/TestExternalStoragePolicySatisfier.java |  33 +++--
 15 files changed, 259 insertions(+), 128 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5213c199/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index bc6e7a4..94426a5 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -63,6 +63,7 @@ function hadoop_usage
   hadoop_add_subcommand "secondarynamenode" daemon "run the DFS secondary 
namenode"
   hadoop_add_subcommand "snapshotDiff" client "diff two snapshots of a 
directory or diff the current directory contents with a snapshot"
   hadoop_add_subcommand "storagepolicies" admin 
"list/get/set/satisfyStoragePolicy block storage policies"
+  hadoop_add_subcommand "sps" daemon "run external storagepolicysatisfier"
   hadoop_add_subcommand "version" client "print the version"
   hadoop_add_subcommand "zkfc" daemon "run the ZK Failover Controller daemon"
   hadoop_generate_usage "${HADOOP_SHELL_EXECNAME}" false
@@ -201,6 +202,10 @@ function hdfscmd_case
 storagepolicies)
   HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.StoragePolicyAdmin
 ;;
+sps)
+  HADOOP_SUBCMD_SUPPORTDAEMONIZATION="true"
+  
HADOOP_CLASSNAME=org.apache.hadoop.hdfs.server.sps.ExternalStoragePolicySatisfier
+;;
 version)
   HADOOP_CLASSNAME=org.apache.hadoop.util.VersionInfo
 ;;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5213c199/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index daaa7a3..f348a33 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -94,6 +94,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeBlockMoveTaskHandler;
+import org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeContext;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.IntraSPSNameNodeFileIdCollector;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSPathIds;
 import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
 import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
@@ -5098,9 +5101,15 @@ public class BlockManager implements BlockStatsMXBean {
   return;
 }
 updateSPSMode(StoragePolicySatisfierMode.INTERNAL);
+sps.init(new IntraSPSNameNodeContext(this.namesystem, this, sps),
+new IntraSPSNameNodeFileIdCollector(this.namesystem.getFSDirectory(),
+sps),
+new 

[21/50] [abbrv] hadoop git commit: HDFS-12556: [SPS] : Block movement analysis should be done in read lock.

2018-07-12 Thread rakeshr
HDFS-12556: [SPS] : Block movement analysis should be done in read lock.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9edad140
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9edad140
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9edad140

Branch: refs/heads/HDFS-10285
Commit: 9edad140600d0692e2b432c8714f2125cd5525d1
Parents: 73a3c94
Author: Surendra Singh Lilhore 
Authored: Sat Oct 14 15:11:26 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:02:39 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 27 +---
 .../TestPersistentStoragePolicySatisfier.java   |  2 +-
 2 files changed, 19 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edad140/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a28a806..cbfba44 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -242,12 +242,25 @@ public class StoragePolicySatisfier implements Runnable {
   ItemInfo itemInfo = storageMovementNeeded.get();
   if (itemInfo != null) {
 long trackId = itemInfo.getTrackId();
-BlockCollection blockCollection =
-namesystem.getBlockCollection(trackId);
-// Check blockCollectionId existence.
+BlockCollection blockCollection;
+BlocksMovingAnalysis status = null;
+try {
+  namesystem.readLock();
+  blockCollection = namesystem.getBlockCollection(trackId);
+  // Check blockCollectionId existence.
+  if (blockCollection == null) {
+// File doesn't exists (maybe got deleted), remove trackId from
+// the queue
+storageMovementNeeded.removeItemTrackInfo(itemInfo);
+  } else {
+status =
+analyseBlocksStorageMovementsAndAssignToDN(
+blockCollection);
+  }
+} finally {
+  namesystem.readUnlock();
+}
 if (blockCollection != null) {
-  BlocksMovingAnalysis status =
-  analyseBlocksStorageMovementsAndAssignToDN(blockCollection);
   switch (status.status) {
   // Just add to monitor, so it will be retried after timeout
   case ANALYSIS_SKIPPED_FOR_RETRY:
@@ -283,10 +296,6 @@ public class StoragePolicySatisfier implements Runnable {
 storageMovementNeeded.removeItemTrackInfo(itemInfo);
 break;
   }
-} else {
-  // File doesn't exists (maybe got deleted), remove trackId from
-  // the queue
-  storageMovementNeeded.removeItemTrackInfo(itemInfo);
 }
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9edad140/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index 5bce296..7165d06 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -72,7 +72,7 @@ public class TestPersistentStoragePolicySatisfier {
   {StorageType.DISK, StorageType.ARCHIVE, StorageType.SSD}
   };
 
-  private final int timeout = 30;
+  private final int timeout = 9;
 
   /**
* Setup environment for every test case.


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[30/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by 
Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9062df87
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9062df87
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9062df87

Branch: refs/heads/HDFS-10285
Commit: 9062df87976c5611afd4a8a64ef2bbe645d17cbf
Parents: d18053d
Author: Uma Maheswara Rao G 
Authored: Fri Dec 22 09:10:12 2017 -0800
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:03:12 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|6 +-
 .../BlockStorageMovementAttemptedItems.java |  241 ---
 .../namenode/BlockStorageMovementNeeded.java|  574 --
 .../hdfs/server/namenode/FSNamesystem.java  |1 +
 .../hdfs/server/namenode/IntraNNSPSContext.java |   41 +
 .../server/namenode/StoragePolicySatisfier.java |  973 --
 .../sps/BlockStorageMovementAttemptedItems.java |  241 +++
 .../sps/BlockStorageMovementNeeded.java |  572 ++
 .../namenode/sps/StoragePolicySatisfier.java|  988 ++
 .../hdfs/server/namenode/sps/package-info.java  |   28 +
 .../TestBlockStorageMovementAttemptedItems.java |  196 --
 .../namenode/TestStoragePolicySatisfier.java| 1775 -
 ...stStoragePolicySatisfierWithStripedFile.java |  580 --
 .../TestBlockStorageMovementAttemptedItems.java |  196 ++
 .../sps/TestStoragePolicySatisfier.java | 1779 ++
 ...stStoragePolicySatisfierWithStripedFile.java |  580 ++
 16 files changed, 4430 insertions(+), 4341 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9062df87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index c81ed6c..1cf687e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,11 +89,12 @@ import 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.namenode.CachedBlock;
 import org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
+import org.apache.hadoop.hdfs.server.namenode.IntraNNSPSContext;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
+import org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -478,7 +479,8 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, this, conf);
+StoragePolicySatisfier.Context spsctxt = new IntraNNSPSContext(namesystem);
+sps = new StoragePolicySatisfier(namesystem, this, conf, spsctxt);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9062df87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
deleted file mode 100644
index 643255f..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ /dev/null
@@ -1,241 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work 

[47/50] [abbrv] hadoop git commit: HDFS-13165: [SPS]: Collects successfully moved block details via IBR. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13165: [SPS]: Collects successfully moved block details via IBR. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/02ebf5d4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/02ebf5d4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/02ebf5d4

Branch: refs/heads/HDFS-10285
Commit: 02ebf5d4a84030ceccc41f466a76c361e7ccba32
Parents: dc52bd4
Author: Rakesh Radhakrishnan 
Authored: Sun Apr 29 11:06:59 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:04:47 2018 +0530

--
 .../DatanodeProtocolClientSideTranslatorPB.java |  11 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java |  25 ---
 .../server/blockmanagement/BlockManager.java|  86 +-
 .../sps/BlockMovementAttemptFinished.java   |  24 ++-
 .../common/sps/BlockStorageMovementTracker.java | 109 +---
 .../sps/BlocksMovementsStatusHandler.java   |  70 +---
 .../hdfs/server/datanode/BPServiceActor.java|  14 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   7 +-
 .../datanode/StoragePolicySatisfyWorker.java|  48 ++
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  13 +-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   8 +-
 .../hdfs/server/namenode/FSDirectory.java   |   5 +-
 .../hdfs/server/namenode/FSNamesystem.java  |  30 ++--
 .../hadoop/hdfs/server/namenode/NameNode.java   |  19 ++-
 .../hdfs/server/namenode/NameNodeRpcServer.java |  46 +++--
 .../sps/BlockStorageMovementAttemptedItems.java | 167 +--
 .../hdfs/server/namenode/sps/SPSService.java|  19 ++-
 .../namenode/sps/StoragePolicySatisfier.java| 154 +++--
 .../hdfs/server/protocol/DatanodeProtocol.java  |   4 +-
 .../sps/ExternalSPSBlockMoveTaskHandler.java|  32 ++--
 .../sps/ExternalStoragePolicySatisfier.java |   3 +-
 .../src/main/proto/DatanodeProtocol.proto   |   9 -
 .../src/main/resources/hdfs-default.xml |  41 +
 .../TestNameNodePrunesMissingStorages.java  |   4 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../SimpleBlocksMovementsStatusHandler.java |  88 ++
 .../server/datanode/TestBPOfferService.java |  12 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   7 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  76 +
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   9 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../hdfs/server/namenode/TestDeadDatanode.java  |   5 +-
 .../namenode/TestNameNodeReconfigure.java   |  17 +-
 .../TestBlockStorageMovementAttemptedItems.java |  88 ++
 .../sps/TestStoragePolicySatisfier.java |  73 ++--
 ...stStoragePolicySatisfierWithStripedFile.java |  40 +++--
 .../sps/TestExternalStoragePolicySatisfier.java |  44 ++---
 42 files changed, 776 insertions(+), 659 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/02ebf5d4/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
index dcc0705..e4125dc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java
@@ -48,7 +48,6 @@ import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageBlock
 import 
org.apache.hadoop.hdfs.protocol.proto.DatanodeProtocolProtos.StorageReceivedDeletedBlocksProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.HdfsServerProtos.VersionRequestProto;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
@@ -139,8 +138,7 @@ public class DatanodeProtocolClientSideTranslatorPB 
implements
   VolumeFailureSummary volumeFailureSummary,
   boolean requestFullBlockReportLease,
   @Nonnull 

[10/50] [abbrv] hadoop git commit: HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running together. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-11264: [SPS]: Double checks to ensure that SPS/Mover are not running 
together. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b43972ae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b43972ae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b43972ae

Branch: refs/heads/HDFS-10285
Commit: b43972ae5dd955bbe9450ca0116a14853d41d809
Parents: 55f32ce
Author: Uma Maheswara Rao G 
Authored: Wed Jul 12 17:56:56 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:01:28 2018 +0530

--
 .../server/namenode/StoragePolicySatisfier.java | 53 +++-
 .../namenode/TestStoragePolicySatisfier.java|  3 +-
 ...stStoragePolicySatisfierWithStripedFile.java |  5 +-
 3 files changed, 34 insertions(+), 27 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b43972ae/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 97cbf1b..00b4cd0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -128,6 +128,14 @@ public class StoragePolicySatisfier implements Runnable {
*/
   public synchronized void start(boolean reconfigStart) {
 isRunning = true;
+if (checkIfMoverRunning()) {
+  isRunning = false;
+  LOG.error(
+  "Stopping StoragePolicySatisfier thread " + "as Mover ID file "
+  + HdfsServerConstants.MOVER_ID_PATH.toString()
+  + " been opened. Maybe a Mover instance is running!");
+  return;
+}
 if (reconfigStart) {
   LOG.info("Starting StoragePolicySatisfier, as admin requested to "
   + "activate it.");
@@ -211,20 +219,6 @@ public class StoragePolicySatisfier implements Runnable {
 
   @Override
   public void run() {
-boolean isMoverRunning = !checkIfMoverRunning();
-synchronized (this) {
-  isRunning = isMoverRunning;
-  if (!isRunning) {
-// Stopping monitor thread and clearing queues as well
-this.clearQueues();
-this.storageMovementsMonitor.stopGracefully();
-LOG.error(
-"Stopping StoragePolicySatisfier thread " + "as Mover ID file "
-+ HdfsServerConstants.MOVER_ID_PATH.toString()
-+ " been opened. Maybe a Mover instance is running!");
-return;
-  }
-}
 while (namesystem.isRunning() && isRunning) {
   try {
 if (!namesystem.isInSafeMode()) {
@@ -274,25 +268,34 @@ public class StoragePolicySatisfier implements Runnable {
 // we want to check block movements.
 Thread.sleep(3000);
   } catch (Throwable t) {
-synchronized (this) {
+handleException(t);
+  }
+}
+  }
+
+  private void handleException(Throwable t) {
+// double check to avoid entering into synchronized block.
+if (isRunning) {
+  synchronized (this) {
+if (isRunning) {
   isRunning = false;
   // Stopping monitor thread and clearing queues as well
   this.clearQueues();
   this.storageMovementsMonitor.stopGracefully();
-}
-if (!namesystem.isRunning()) {
-  LOG.info("Stopping StoragePolicySatisfier.");
-  if (!(t instanceof InterruptedException)) {
-LOG.info("StoragePolicySatisfier received an exception"
-+ " while shutting down.", t);
+  if (!namesystem.isRunning()) {
+LOG.info("Stopping StoragePolicySatisfier.");
+if (!(t instanceof InterruptedException)) {
+  LOG.info("StoragePolicySatisfier received an exception"
+  + " while shutting down.", t);
+}
+return;
   }
-  break;
 }
-LOG.error("StoragePolicySatisfier thread received runtime exception. "
-+ "Stopping Storage policy satisfier work", t);
-break;
   }
 }
+LOG.error("StoragePolicySatisfier thread received runtime exception. "
++ "Stopping Storage policy satisfier work", t);
+return;
   }
 
   private BlocksMovingAnalysisStatus 
analyseBlocksStorageMovementsAndAssignToDN(


[43/50] [abbrv] hadoop git commit: HDFS-13110: [SPS]: Reduce the number of APIs in NamenodeProtocol used by external satisfier. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/538b9bd9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
deleted file mode 100644
index ff277ba..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/sps/ExternalSPSFileIDCollector.java
+++ /dev/null
@@ -1,174 +0,0 @@
-package org.apache.hadoop.hdfs.server.sps;
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-import java.io.IOException;
-import java.util.ArrayList;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.protocol.DirectoryListing;
-import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
-import org.apache.hadoop.hdfs.server.namenode.sps.Context;
-import org.apache.hadoop.hdfs.server.namenode.sps.FileIdCollector;
-import org.apache.hadoop.hdfs.server.namenode.sps.ItemInfo;
-import org.apache.hadoop.hdfs.server.namenode.sps.SPSService;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-/**
- * This class is to scan the paths recursively. If file is directory, then it
- * will scan for files recursively. If the file is non directory, then it will
- * just submit the same file to process.
- */
-@InterfaceAudience.Private
-public class ExternalSPSFileIDCollector implements FileIdCollector {
-  public static final Logger LOG =
-  LoggerFactory.getLogger(ExternalSPSFileIDCollector.class);
-  private Context cxt;
-  private DistributedFileSystem dfs;
-  private SPSService service;
-  private int maxQueueLimitToScan;
-
-  public ExternalSPSFileIDCollector(Context cxt, SPSService service) {
-this.cxt = cxt;
-this.service = service;
-this.maxQueueLimitToScan = service.getConf().getInt(
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY,
-DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT);
-try {
-  // TODO: probably we could get this dfs from external context? but this 
is
-  // too specific to external.
-  dfs = getFS(service.getConf());
-} catch (IOException e) {
-  LOG.error("Unable to get the filesystem. Make sure Namenode running and "
-  + "configured namenode address is correct.", e);
-}
-  }
-
-  private DistributedFileSystem getFS(Configuration conf) throws IOException {
-return (DistributedFileSystem) FileSystem
-.get(FileSystem.getDefaultUri(conf), conf);
-  }
-
-  /**
-   * Recursively scan the given path and add the file info to SPS service for
-   * processing.
-   */
-  private long processPath(long startID, String fullPath) {
-long pendingWorkCount = 0; // to be satisfied file counter
-for (byte[] lastReturnedName = HdfsFileStatus.EMPTY_NAME;;) {
-  final DirectoryListing children;
-  try {
-children = dfs.getClient().listPaths(fullPath, lastReturnedName, 
false);
-  } catch (IOException e) {
-LOG.warn("Failed to list directory " + fullPath
-+ ". Ignore the directory and continue.", e);
-return pendingWorkCount;
-  }
-  if (children == null) {
-if (LOG.isDebugEnabled()) {
-  LOG.debug("The scanning start dir/sub dir " + fullPath
-  + " does not have childrens.");
-}
-return pendingWorkCount;
-  }
-
-  for (HdfsFileStatus child : children.getPartialListing()) {
-if (child.isFile()) {
-  service.addFileIdToProcess(new ItemInfo(startID, child.getFileId()),
-  false);
-  checkProcessingQueuesFree();
-  pendingWorkCount++; // increment to be satisfied file count
-} else {
-  String fullPathStr = child.getFullName(fullPath);
- 

[07/50] [abbrv] hadoop git commit: HDFS-11966. [SPS] Correct the log in BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11966. [SPS] Correct the log in 
BlockStorageMovementAttemptedItems#blockStorageMovementResultCheck. Contributed 
by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4274dcc9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4274dcc9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4274dcc9

Branch: refs/heads/HDFS-10285
Commit: 4274dcc9eac4cd6e6559d27f27901a0bfdbae987
Parents: 0b483da
Author: Rakesh Radhakrishnan 
Authored: Sun Jun 18 11:00:28 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:01:04 2018 +0530

--
 .../BlockStorageMovementAttemptedItems.java | 39 ++--
 1 file changed, 20 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4274dcc9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
index bf7859c..6048986 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
@@ -296,19 +296,17 @@ public class BlockStorageMovementAttemptedItems {
 .next();
 synchronized (storageMovementAttemptedItems) {
   Status status = storageMovementAttemptedResult.getStatus();
+  long trackId = storageMovementAttemptedResult.getTrackId();
   ItemInfo itemInfo;
   switch (status) {
   case FAILURE:
-blockStorageMovementNeeded
-.add(storageMovementAttemptedResult.getTrackId());
+blockStorageMovementNeeded.add(trackId);
 LOG.warn("Blocks storage movement results for the tracking id: {}"
 + " is reported from co-ordinating datanode, but result"
-+ " status is FAILURE. So, added for retry",
-storageMovementAttemptedResult.getTrackId());
++ " status is FAILURE. So, added for retry", trackId);
 break;
   case SUCCESS:
-itemInfo = storageMovementAttemptedItems
-.get(storageMovementAttemptedResult.getTrackId());
+itemInfo = storageMovementAttemptedItems.get(trackId);
 
 // ItemInfo could be null. One case is, before the blocks movements
 // result arrives the attempted trackID became timed out and then
@@ -318,20 +316,23 @@ public class BlockStorageMovementAttemptedItems {
 // following condition. If all the block locations under the 
trackID
 // are attempted and failed to find matching target nodes to 
satisfy
 // storage policy in previous SPS iteration.
-if (itemInfo != null
-&& !itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
-  blockStorageMovementNeeded
-  .add(storageMovementAttemptedResult.getTrackId());
-  LOG.warn("Blocks storage movement is SUCCESS for the track id: 
{}"
-  + " reported from co-ordinating datanode. But adding trackID"
-  + " back to retry queue as some of the blocks couldn't find"
-  + " matching target nodes in previous SPS iteration.",
-  storageMovementAttemptedResult.getTrackId());
+String msg = "Blocks storage movement is SUCCESS for the track id: 
"
++ trackId + " reported from co-ordinating datanode.";
+if (itemInfo != null) {
+  if (!itemInfo.isAllBlockLocsAttemptedToSatisfy()) {
+blockStorageMovementNeeded.add(trackId);
+LOG.warn("{} But adding trackID back to retry queue as some of"
++ " the blocks couldn't find matching target nodes in"
++ " previous SPS iteration.", msg);
+  } else {
+LOG.info(msg);
+// Remove xattr for the track id.
+this.sps.postBlkStorageMovementCleanup(
+storageMovementAttemptedResult.getTrackId());
+  }
 } else {
-  LOG.info("Blocks storage movement is SUCCESS for the track id: 
{}"
-  + " reported from co-ordinating datanode. But the trackID "
-  + "doesn't exists in storageMovementAttemptedItems list",
-  

[16/50] [abbrv] hadoop git commit: HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-12225: [SPS]: Optimize extended attributes for tracking SPS movements. 
Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/60e9c157
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/60e9c157
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/60e9c157

Branch: refs/heads/HDFS-10285
Commit: 60e9c1572ba87677f947b79236165c5b2e8ffa0c
Parents: 544eb25
Author: Uma Maheswara Rao G 
Authored: Wed Aug 23 15:37:03 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:02:15 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  21 +-
 .../server/blockmanagement/DatanodeManager.java |  14 +-
 .../hdfs/server/datanode/BPOfferService.java|   1 +
 .../BlockStorageMovementAttemptedItems.java |  95 +---
 .../namenode/BlockStorageMovementNeeded.java| 233 ++-
 .../namenode/FSDirSatisfyStoragePolicyOp.java   |  91 +++-
 .../hdfs/server/namenode/FSDirXAttrOp.java  |  11 +-
 .../hdfs/server/namenode/FSDirectory.java   |   2 +-
 .../hdfs/server/namenode/FSNamesystem.java  |   2 +-
 .../server/namenode/StoragePolicySatisfier.java | 108 ++---
 .../TestStoragePolicySatisfyWorker.java |   5 +-
 .../TestBlockStorageMovementAttemptedItems.java |  34 +--
 .../TestPersistentStoragePolicySatisfier.java   | 104 +
 .../namenode/TestStoragePolicySatisfier.java| 127 +-
 14 files changed, 589 insertions(+), 259 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/60e9c157/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index a442a92..0ee558a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -89,7 +89,6 @@ import 
org.apache.hadoop.hdfs.server.namenode.INode.BlocksMapUpdateInfo;
 import org.apache.hadoop.hdfs.server.namenode.INodesInPath;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.Namesystem;
-import org.apache.hadoop.hdfs.server.namenode.BlockStorageMovementNeeded;
 import org.apache.hadoop.hdfs.server.namenode.StoragePolicySatisfier;
 import org.apache.hadoop.hdfs.server.namenode.ha.HAContext;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
@@ -431,9 +430,6 @@ public class BlockManager implements BlockStatsMXBean {
   private final StoragePolicySatisfier sps;
   private final boolean storagePolicyEnabled;
   private boolean spsEnabled;
-  private final BlockStorageMovementNeeded storageMovementNeeded =
-  new BlockStorageMovementNeeded();
-
   /** Minimum live replicas needed for the datanode to be transitioned
* from ENTERING_MAINTENANCE to IN_MAINTENANCE.
*/
@@ -480,8 +476,7 @@ public class BlockManager implements BlockStatsMXBean {
 conf.getBoolean(
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT);
-sps = new StoragePolicySatisfier(namesystem, storageMovementNeeded, this,
-conf);
+sps = new StoragePolicySatisfier(namesystem, this, conf);
 blockTokenSecretManager = createBlockTokenSecretManager(conf);
 
 providedStorageMap = new ProvidedStorageMap(namesystem, this, conf);
@@ -5009,20 +5004,6 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
-   * Set file block collection for which storage movement needed for its 
blocks.
-   *
-   * @param id
-   *  - file block collection id.
-   */
-  public void satisfyStoragePolicy(long id) {
-storageMovementNeeded.add(id);
-if (LOG.isDebugEnabled()) {
-  LOG.debug("Added block collection id {} to block "
-  + "storageMovementNeeded queue", id);
-}
-  }
-
-  /**
* Gets the storage policy satisfier instance.
*
* @return sps

http://git-wip-us.apache.org/repos/asf/hadoop/blob/60e9c157/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
 

[19/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/73a3c94c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index d3c5cb1..2f621e6 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -156,7 +156,7 @@ import 
org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
 import org.apache.hadoop.hdfs.server.namenode.NameNode.OperationCategory;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.BlockReportContext;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -1517,14 +1517,15 @@ public class NameNodeRpcServer implements 
NamenodeProtocols {
   boolean requestFullBlockReportLease,
   @Nonnull SlowPeerReports slowPeers,
   @Nonnull SlowDiskReports slowDisks,
-  BlocksStorageMovementResult[] blkMovementStatus) throws IOException {
+  BlocksStorageMoveAttemptFinished storageMovementFinishedBlks)
+  throws IOException {
 checkNNStartup();
 verifyRequest(nodeReg);
 return namesystem.handleHeartbeat(nodeReg, report,
 dnCacheCapacity, dnCacheUsed, xceiverCount, xmitsInProgress,
 failedVolumes, volumeFailureSummary, requestFullBlockReportLease,
 slowPeers, slowDisks,
-blkMovementStatus);
+storageMovementFinishedBlks);
   }
 
   @Override // DatanodeProtocol

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73a3c94c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index a4372d5..a28a806 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import static org.apache.hadoop.util.Time.monotonicNow;
+
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collections;
@@ -44,7 +46,7 @@ import 
org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
 import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo;
 import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockStorageMovementCommand.BlockMovingInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMovementResult;
+import org.apache.hadoop.hdfs.server.protocol.BlocksStorageMoveAttemptFinished;
 import org.apache.hadoop.hdfs.server.protocol.StorageReport;
 import org.apache.hadoop.hdfs.util.StripedBlockUtil;
 import org.apache.hadoop.util.Daemon;
@@ -82,25 +84,38 @@ public class StoragePolicySatisfier implements Runnable {
   /**
* Represents the collective analysis status for all blocks.
*/
-  private enum BlocksMovingAnalysisStatus {
-// Represents that, the analysis skipped due to some conditions. A such
-// condition is if block collection is in incomplete state.
-ANALYSIS_SKIPPED_FOR_RETRY,
-// Represents that, all block storage movement needed blocks found its
-// targets.
-ALL_BLOCKS_TARGETS_PAIRED,
-// Represents that, only fewer or none of the block storage movement needed
-// block found its eligible targets.
-FEW_BLOCKS_TARGETS_PAIRED,
-// Represents that, none of the blocks found for block storage movements.
-BLOCKS_ALREADY_SATISFIED,
-// Represents that, the analysis skipped due to some conditions.
-// Example conditions are if no blocks really exists in block collection or
-// if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED,
-// Represents that, All the reported blocks are satisfied the policy but
-// some of the blocks are low redundant.
-FEW_LOW_REDUNDANCY_BLOCKS
+  private 

[50/50] [abbrv] hadoop git commit: HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-13076: [SPS]: Resolve conflicts after rebasing HDFS-10285 branch to trunk. 
Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6a1e5ab
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6a1e5ab
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6a1e5ab

Branch: refs/heads/HDFS-10285
Commit: c6a1e5ab46c096637bd0be60bfcfaa8f0c30d759
Parents: 24bcc8c
Author: Rakesh Radhakrishnan 
Authored: Thu Jul 5 10:10:13 2018 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:05:03 2018 +0530

--
 .../hdfs/server/federation/router/RouterNamenodeProtocol.java | 6 ++
 .../hadoop/hdfs/server/federation/router/RouterRpcServer.java | 7 +++
 .../hadoop/hdfs/server/blockmanagement/BlockManager.java  | 2 +-
 .../server/namenode/sps/IntraSPSNameNodeFileIdCollector.java  | 4 ++--
 4 files changed, 16 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a1e5ab/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
index 0433650..edfb391 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterNamenodeProtocol.java
@@ -184,4 +184,10 @@ public class RouterNamenodeProtocol implements 
NamenodeProtocol {
 rpcServer.checkOperation(OperationCategory.READ, false);
 return false;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a1e5ab/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
index c8a5484..426bf6c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-rbf/src/main/java/org/apache/hadoop/hdfs/server/federation/router/RouterRpcServer.java
@@ -2531,4 +2531,11 @@ public class RouterRpcServer extends AbstractService
 checkOperation(OperationCategory.READ, false);
 return StoragePolicySatisfyPathStatus.NOT_AVAILABLE;
   }
+
+  @Override
+  public Long getNextSPSPath() throws IOException {
+checkOperation(OperationCategory.READ, false);
+// not supported
+return null;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a1e5ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index cb0de67..94ada2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -5070,7 +5070,7 @@ public class BlockManager implements BlockStatsMXBean {
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY,
 DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_DEFAULT);
 String modeVal = spsMode;
-if (org.apache.commons.lang.StringUtils.isBlank(modeVal)) {
+if (org.apache.commons.lang3.StringUtils.isBlank(modeVal)) {
   modeVal = conf.get(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
   DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6a1e5ab/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/IntraSPSNameNodeFileIdCollector.java
--
diff --git 

[13/50] [abbrv] hadoop git commit: HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-12141: [SPS]: Fix checkstyle warnings. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2eb29c21
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2eb29c21
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2eb29c21

Branch: refs/heads/HDFS-10285
Commit: 2eb29c214a593fbf94e7c777fe5287d8ae3ece0a
Parents: ac8f8e8
Author: Uma Maheswara Rao G 
Authored: Mon Jul 17 10:24:06 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:01:52 2018 +0530

--
 .../hdfs/server/blockmanagement/BlockManager.java   |  2 +-
 .../server/datanode/StoragePolicySatisfyWorker.java |  6 +++---
 .../hdfs/server/namenode/StoragePolicySatisfier.java|  6 +++---
 .../hadoop/hdfs/server/protocol/DatanodeProtocol.java   |  5 ++---
 .../org/apache/hadoop/hdfs/server/mover/TestMover.java  |  7 ---
 .../server/namenode/TestStoragePolicySatisfier.java | 12 ++--
 6 files changed, 19 insertions(+), 19 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2eb29c21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index ddf3f6c..5db2c3a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -427,7 +427,7 @@ public class BlockManager implements BlockStatsMXBean {
 
   private final BlockIdManager blockIdManager;
 
-  /** For satisfying block storage policies */
+  /** For satisfying block storage policies. */
   private final StoragePolicySatisfier sps;
   private final BlockStorageMovementNeeded storageMovementNeeded =
   new BlockStorageMovementNeeded();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2eb29c21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
index f4f97dd..196cd58 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/StoragePolicySatisfyWorker.java
@@ -329,7 +329,7 @@ public class StoragePolicySatisfyWorker {
   /**
* Block movement status code.
*/
-  public static enum BlockMovementStatus {
+  public enum BlockMovementStatus {
 /** Success. */
 DN_BLK_STORAGE_MOVEMENT_SUCCESS(0),
 /**
@@ -343,7 +343,7 @@ public class StoragePolicySatisfyWorker {
 
 private final int code;
 
-private BlockMovementStatus(int code) {
+BlockMovementStatus(int code) {
   this.code = code;
 }
 
@@ -365,7 +365,7 @@ public class StoragePolicySatisfyWorker {
 private final DatanodeInfo target;
 private final BlockMovementStatus status;
 
-public BlockMovementResult(long trackId, long blockId,
+BlockMovementResult(long trackId, long blockId,
 DatanodeInfo target, BlockMovementStatus status) {
   this.trackId = trackId;
   this.blockId = blockId;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2eb29c21/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 00b4cd0..af3b7f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -714,10 +714,10 @@ public class StoragePolicySatisfier implements Runnable {
   }
 
   private static class StorageTypeNodePair {
-public StorageType storageType = null;
-public DatanodeDescriptor dn = null;
+private StorageType storageType = null;
+private 

[29/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9062df87/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
new file mode 100644
index 000..5635621
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/sps/BlockStorageMovementNeeded.java
@@ -0,0 +1,572 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Map.Entry;
+import java.util.Queue;
+import java.util.concurrent.ConcurrentHashMap;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.hdfs.server.namenode.Namesystem;
+import 
org.apache.hadoop.hdfs.server.namenode.sps.StoragePolicySatisfier.ItemInfo;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser.TraverseInfo;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.Time;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * A Class to track the block collection IDs (Inode's ID) for which physical
+ * storage movement needed as per the Namespace and StorageReports from DN.
+ * It scan the pending directories for which storage movement is required and
+ * schedule the block collection IDs for movement. It track the info of
+ * scheduled items and remove the SPS xAttr from the file/Directory once
+ * movement is success.
+ */
+@InterfaceAudience.Private
+public class BlockStorageMovementNeeded {
+
+  public static final Logger LOG =
+  LoggerFactory.getLogger(BlockStorageMovementNeeded.class);
+
+  private final Queue storageMovementNeeded =
+  new LinkedList();
+
+  /**
+   * Map of startId and number of child's. Number of child's indicate the
+   * number of files pending to satisfy the policy.
+   */
+  private final Map pendingWorkForDirectory =
+  new HashMap();
+
+  private final Map spsStatus =
+  new ConcurrentHashMap<>();
+
+  private final Namesystem namesystem;
+
+  // List of pending dir to satisfy the policy
+  private final Queue spsDirsToBeTraveresed = new LinkedList();
+
+  private final StoragePolicySatisfier sps;
+
+  private Daemon inodeIdCollector;
+
+  private final int maxQueuedItem;
+
+  // Amount of time to cache the SUCCESS status of path before turning it to
+  // NOT_AVAILABLE.
+  private static long statusClearanceElapsedTimeMs = 30;
+
+  public BlockStorageMovementNeeded(Namesystem namesystem,
+  StoragePolicySatisfier sps, int queueLimit) {
+this.namesystem = namesystem;
+this.sps = sps;
+this.maxQueuedItem = queueLimit;
+  }
+
+  /**
+   * Add the candidate to tracking list for which storage movement
+   * expected if necessary.
+   *
+   * @param trackInfo
+   *  - track info for satisfy the policy
+   */
+  public synchronized void add(ItemInfo trackInfo) {
+spsStatus.put(trackInfo.getStartId(),
+new StoragePolicySatisfyPathStatusInfo(
+StoragePolicySatisfyPathStatus.IN_PROGRESS));
+storageMovementNeeded.add(trackInfo);
+  }
+
+  /**
+   * Add the itemInfo to tracking list for which storage movement
+   * expected if necessary.
+   * @param startId
+   *- start id
+  

[05/50] [abbrv] hadoop git commit: HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11762. [SPS]: Empty files should be ignored in StoragePolicySatisfier. 
Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ab0f4123
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ab0f4123
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ab0f4123

Branch: refs/heads/HDFS-10285
Commit: ab0f412387937c9bd1468beed6a4f3c495457181
Parents: dacec33
Author: Rakesh Radhakrishnan 
Authored: Mon Jun 5 12:32:41 2017 +0530
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:00:48 2018 +0530

--
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 15 ++---
 .../namenode/TestStoragePolicySatisfier.java| 32 
 2 files changed, 42 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab0f4123/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
index 81d337f..bd4e5ed 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirSatisfyStoragePolicyOp.java
@@ -51,7 +51,6 @@ final class FSDirSatisfyStoragePolicyOp {
 
 assert fsd.getFSNamesystem().hasWriteLock();
 FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
 INodesInPath iip;
 fsd.writeLock();
 try {
@@ -62,8 +61,11 @@ final class FSDirSatisfyStoragePolicyOp {
 fsd.checkPathAccess(pc, iip, FsAction.WRITE);
   }
   XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-  fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  if (satisfyXAttr != null) {
+List xAttrs = Lists.newArrayListWithCapacity(1);
+xAttrs.add(satisfyXAttr);
+fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
+  }
 } finally {
   fsd.writeUnlock();
 }
@@ -79,16 +81,19 @@ final class FSDirSatisfyStoragePolicyOp {
 
 // TODO: think about optimization here, label the dir instead
 // of the sub-files of the dir.
-if (inode.isFile()) {
+if (inode.isFile() && inode.asFile().numBlocks() != 0) {
   candidateNodes.add(inode);
 } else if (inode.isDirectory()) {
   for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
+if (node.isFile() && node.asFile().numBlocks() != 0) {
   candidateNodes.add(node);
 }
   }
 }
 
+if (candidateNodes.isEmpty()) {
+  return null;
+}
 // If node has satisfy xattr, then stop adding it
 // to satisfy movement queue.
 if (inodeHasSatisfyXAttr(candidateNodes)) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/ab0f4123/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
index fa954b8..8e08a1e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfier.java
@@ -907,6 +907,38 @@ public class TestStoragePolicySatisfier {
 }
   }
 
+  /**
+   * Test SPS with empty file.
+   * 1. Create one empty file.
+   * 2. Call satisfyStoragePolicy for empty file.
+   * 3. SPS should skip this file and xattr should not be added for empty file.
+   */
+  @Test(timeout = 30)
+  public void testSPSWhenFileLengthIsZero() throws Exception {
+MiniDFSCluster cluster = null;
+try {
+  cluster = new MiniDFSCluster.Builder(new Configuration()).numDataNodes(0)
+  .build();
+  cluster.waitActive();
+  DistributedFileSystem fs = cluster.getFileSystem();
+  Path filePath = new Path("/zeroSizeFile");
+  DFSTestUtil.createFile(fs, filePath, 0, (short) 1, 0);
+  FSEditLog editlog = cluster.getNameNode().getNamesystem().getEditLog();
+  long lastWrittenTxId = editlog.getLastWrittenTxId();
+  

[27/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9062df87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
deleted file mode 100644
index 6991ad2..000
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStoragePolicySatisfierWithStripedFile.java
+++ /dev/null
@@ -1,580 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs.server.namenode;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.concurrent.TimeoutException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.StorageType;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
-import org.apache.hadoop.hdfs.NameNodeProxies;
-import org.apache.hadoop.hdfs.StripedFileTestUtil;
-import org.apache.hadoop.hdfs.client.HdfsAdmin;
-import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
-import org.apache.hadoop.hdfs.protocol.LocatedBlock;
-import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
-import org.apache.hadoop.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Supplier;
-
-/**
- * Tests that StoragePolicySatisfier daemon is able to check the striped blocks
- * to be moved and finding its expected target locations in order to satisfy 
the
- * storage policy.
- */
-public class TestStoragePolicySatisfierWithStripedFile {
-
-  private static final Logger LOG = LoggerFactory
-  .getLogger(TestStoragePolicySatisfierWithStripedFile.class);
-
-  private final int stripesPerBlock = 2;
-
-  private ErasureCodingPolicy ecPolicy;
-  private int dataBlocks;
-  private int parityBlocks;
-  private int cellSize;
-  private int defaultStripeBlockSize;
-
-  private ErasureCodingPolicy getEcPolicy() {
-return StripedFileTestUtil.getDefaultECPolicy();
-  }
-
-  /**
-   * Initialize erasure coding policy.
-   */
-  @Before
-  public void init(){
-ecPolicy = getEcPolicy();
-dataBlocks = ecPolicy.getNumDataUnits();
-parityBlocks = ecPolicy.getNumParityUnits();
-cellSize = ecPolicy.getCellSize();
-defaultStripeBlockSize = cellSize * stripesPerBlock;
-  }
-
-  /**
-   * Tests to verify that all the striped blocks(data + parity blocks) are
-   * moving to satisfy the storage policy.
-   */
-  @Test(timeout = 30)
-  public void testMoverWithFullStripe() throws Exception {
-// start 10 datanodes
-int numOfDatanodes = 10;
-int storagesPerDatanode = 2;
-long capacity = 20 * defaultStripeBlockSize;
-long[][] capacities = new long[numOfDatanodes][storagesPerDatanode];
-for (int i = 0; i < numOfDatanodes; i++) {
-  for (int j = 0; j < storagesPerDatanode; j++) {
-capacities[i][j] = capacity;
-  }
-}
-
-final Configuration conf = new HdfsConfiguration();
-conf.setBoolean(DFSConfigKeys.DFS_STORAGE_POLICY_SATISFIER_ENABLED_KEY,
-true);
-initConfWithStripe(conf, defaultStripeBlockSize);
-final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
-.numDataNodes(numOfDatanodes)
-.storagesPerDatanode(storagesPerDatanode)
-   

[03/50] [abbrv] hadoop git commit: HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the edits log. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11695: [SPS]: Namenode failed to start while loading SPS xAttrs from the 
edits log. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a61e1f3f
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a61e1f3f
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a61e1f3f

Branch: refs/heads/HDFS-10285
Commit: a61e1f3f89479902c37c62bf5f5e62b609b14986
Parents: 95f64fb
Author: Uma Maheswara Rao G 
Authored: Mon May 22 21:39:43 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:00:33 2018 +0530

--
 .../hdfs/server/namenode/FSDirAttrOp.java   |  91 
 .../namenode/FSDirSatisfyStoragePolicyOp.java   | 145 +++
 .../hdfs/server/namenode/FSDirXAttrOp.java  |   2 +-
 .../hdfs/server/namenode/FSDirectory.java   |  16 --
 .../hdfs/server/namenode/FSNamesystem.java  |  24 ++-
 .../hadoop/hdfs/server/namenode/Namesystem.java |  10 ++
 .../server/namenode/StoragePolicySatisfier.java |   4 +-
 .../TestPersistentStoragePolicySatisfier.java   |  90 +++-
 .../namenode/TestStoragePolicySatisfier.java|   5 +-
 9 files changed, 268 insertions(+), 119 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a61e1f3f/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
index 0df58bf..1dbee96 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirAttrOp.java
@@ -27,7 +27,6 @@ import org.apache.hadoop.fs.XAttr;
 import org.apache.hadoop.fs.XAttrSetFlag;
 import org.apache.hadoop.fs.permission.FsAction;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.XAttrHelper;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
@@ -43,14 +42,12 @@ import com.google.common.collect.Lists;
 
 import java.io.FileNotFoundException;
 import java.io.IOException;
-import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.EnumSet;
 import java.util.List;
 
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_QUOTA_BY_STORAGETYPE_ENABLED_KEY;
 import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
-import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
 
 public class FSDirAttrOp {
   static FileStatus setPermission(
@@ -193,29 +190,6 @@ public class FSDirAttrOp {
 return fsd.getAuditFileInfo(iip);
   }
 
-  static FileStatus satisfyStoragePolicy(FSDirectory fsd, BlockManager bm,
-  String src, boolean logRetryCache) throws IOException {
-
-FSPermissionChecker pc = fsd.getPermissionChecker();
-List xAttrs = Lists.newArrayListWithCapacity(1);
-INodesInPath iip;
-fsd.writeLock();
-try {
-
-  // check operation permission.
-  iip = fsd.resolvePath(pc, src, DirOp.WRITE);
-  if (fsd.isPermissionEnabled()) {
-fsd.checkPathAccess(pc, iip, FsAction.WRITE);
-  }
-  XAttr satisfyXAttr = unprotectedSatisfyStoragePolicy(iip, bm, fsd);
-  xAttrs.add(satisfyXAttr);
-} finally {
-  fsd.writeUnlock();
-}
-fsd.getEditLog().logSetXAttrs(src, xAttrs, logRetryCache);
-return fsd.getAuditFileInfo(iip);
-  }
-
   static BlockStoragePolicy[] getStoragePolicies(BlockManager bm)
   throws IOException {
 return bm.getStoragePolicies();
@@ -477,71 +451,6 @@ public class FSDirAttrOp {
 }
   }
 
-  static XAttr unprotectedSatisfyStoragePolicy(INodesInPath iip,
-  BlockManager bm, FSDirectory fsd) throws IOException {
-
-final INode inode = FSDirectory.resolveLastINode(iip);
-final int snapshotId = iip.getLatestSnapshotId();
-final List candidateNodes = new ArrayList<>();
-
-// TODO: think about optimization here, label the dir instead
-// of the sub-files of the dir.
-if (inode.isFile()) {
-  candidateNodes.add(inode);
-} else if (inode.isDirectory()) {
-  for (INode node : inode.asDirectory().getChildrenList(snapshotId)) {
-if (node.isFile()) {
-  candidateNodes.add(node);
-}
-  }
-}
-
-// If node has satisfy xattr, then stop adding it
-// to satisfy movement queue.
-if (inodeHasSatisfyXAttr(candidateNodes)) {
-  throw new 

[41/50] [abbrv] hadoop git commit: HDFS-13097: [SPS]: Fix the branch review comments(Part1). Contributed by Surendra Singh.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5b34b43/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
index 85a101f..47ea39f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeReconfigure.java
@@ -252,8 +252,8 @@ public class TestNameNodeReconfigure {
 // Since DFS_STORAGE_POLICY_ENABLED_KEY is disabled, SPS can't be enabled.
 assertEquals("SPS shouldn't start as "
 + DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY + " is disabled", false,
-nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 verifySPSEnabled(nameNode, DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL, false);
 
@@ -280,8 +280,8 @@ public class TestNameNodeReconfigure {
   fail("ReconfigurationException expected");
 } catch (ReconfigurationException e) {
   GenericTestUtils.assertExceptionContains(
-  "For enabling or disabling storage policy satisfier, we must "
-  + "pass either none/internal/external string value only",
+  "For enabling or disabling storage policy satisfier, must "
+  + "pass either internal/external/none string value only",
   e.getCause());
 }
 
@@ -301,8 +301,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.EXTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-false, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+false, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.EXTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -342,8 +342,8 @@ public class TestNameNodeReconfigure {
 nameNode.reconfigureProperty(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
 StoragePolicySatisfierMode.INTERNAL.toString());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
-true, nameNode.getNamesystem().getBlockManager()
-.isStoragePolicySatisfierRunning());
+true, nameNode.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 assertEquals(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY + " has wrong value",
 StoragePolicySatisfierMode.INTERNAL.toString(),
 nameNode.getConf().get(DFS_STORAGE_POLICY_SATISFIER_MODE_KEY,
@@ -353,7 +353,8 @@ public class TestNameNodeReconfigure {
   void verifySPSEnabled(final NameNode nameNode, String property,
   StoragePolicySatisfierMode expected, boolean isSatisfierRunning) {
 assertEquals(property + " has wrong value", isSatisfierRunning, nameNode
-.getNamesystem().getBlockManager().isStoragePolicySatisfierRunning());
+.getNamesystem().getBlockManager().getSPSManager()
+.isInternalSatisfierRunning());
 String actual = nameNode.getConf().get(property,
 DFS_STORAGE_POLICY_SATISFIER_MODE_DEFAULT);
 assertEquals(property + " has wrong value", expected,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f5b34b43/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
index b84214c..9f98777 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestPersistentStoragePolicySatisfier.java
@@ -389,7 +389,8 @@ public class TestPersistentStoragePolicySatisfier {
   fs.setStoragePolicy(testFile, ONE_SSD);
   fs.satisfyStoragePolicy(testFile);
 
-  cluster.getNamesystem().getBlockManager().disableSPS();
+  

[26/50] [abbrv] hadoop git commit: HDFS-12955: [SPS]: Move SPS classes to a separate package. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
http://git-wip-us.apache.org/repos/asf/hadoop/blob/9062df87/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
new file mode 100644
index 000..8dc52dc
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/sps/TestStoragePolicySatisfier.java
@@ -0,0 +1,1779 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.sps;
+
+import static 
org.apache.hadoop.hdfs.DFSConfigKeys.DFS_STORAGE_POLICY_ENABLED_KEY;
+import static 
org.apache.hadoop.hdfs.server.common.HdfsServerConstants.XATTR_SATISFY_STORAGE_POLICY;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.slf4j.LoggerFactory.getLogger;
+
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.TimeoutException;
+
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.ReconfigurationException;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.StorageType;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.MiniDFSCluster.DataNodeProperties;
+import org.apache.hadoop.hdfs.NameNodeProxies;
+import org.apache.hadoop.hdfs.StripedFileTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import 
org.apache.hadoop.hdfs.protocol.HdfsConstants.StoragePolicySatisfyPathStatus;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.datanode.InternalDataNodeTestUtils;
+import org.apache.hadoop.hdfs.server.namenode.FSDirectory;
+import org.apache.hadoop.hdfs.server.namenode.FSEditLog;
+import org.apache.hadoop.hdfs.server.namenode.FSTreeTraverser;
+import org.apache.hadoop.hdfs.server.namenode.INode;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.apache.hadoop.test.GenericTestUtils.LogCapturer;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.slf4j.event.Level;
+
+import com.google.common.base.Supplier;
+
+/**
+ * Tests that StoragePolicySatisfier daemon is able to check the blocks to be
+ * moved and finding its suggested target locations to move.
+ */
+public class TestStoragePolicySatisfier {
+
+  {
+GenericTestUtils.setLogLevel(
+getLogger(FSTreeTraverser.class), Level.DEBUG);
+  }
+
+  private static final String ONE_SSD = "ONE_SSD";
+  private static final String COLD = "COLD";
+  private static final Logger LOG =
+  LoggerFactory.getLogger(TestStoragePolicySatisfier.class);
+  private final Configuration config = new HdfsConfiguration();
+  private StorageType[][] allDiskTypes =
+ 

[17/50] [abbrv] hadoop git commit: HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy storage policy of all the files under the given dir. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-12291: [SPS]: Provide a mechanism to recursively iterate and satisfy 
storage policy of all the files under the given dir. Contributed by Surendra 
Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50d23317
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50d23317
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50d23317

Branch: refs/heads/HDFS-10285
Commit: 50d23317a4fcdd52615756033fdba792170f9eeb
Parents: 60e9c15
Author: Uma Maheswara Rao G 
Authored: Sat Sep 30 06:31:52 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:02:23 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +
 .../java/org/apache/hadoop/hdfs/DFSUtil.java|  22 +-
 .../BlockStorageMovementAttemptedItems.java |   8 +-
 .../namenode/BlockStorageMovementNeeded.java| 277 +++---
 .../server/namenode/ReencryptionHandler.java|   1 +
 .../server/namenode/StoragePolicySatisfier.java |  43 ++-
 .../src/main/resources/hdfs-default.xml |  23 ++
 .../src/site/markdown/ArchivalStorage.md|   3 +-
 .../TestBlockStorageMovementAttemptedItems.java |   2 +-
 .../TestPersistentStoragePolicySatisfier.java   |   8 +-
 .../namenode/TestStoragePolicySatisfier.java| 377 ++-
 11 files changed, 689 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50d23317/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index cf5206a..c8fa40c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -612,6 +612,14 @@ public class DFSConfigKeys extends CommonConfigurationKeys 
{
   "dfs.storage.policy.satisfier.enabled";
   public static final boolean DFS_STORAGE_POLICY_SATISFIER_ENABLED_DEFAULT =
   false;
+  public static final String  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_KEY =
+  "dfs.storage.policy.satisfier.queue.limit";
+  public static final int  DFS_STORAGE_POLICY_SATISFIER_QUEUE_LIMIT_DEFAULT =
+  1000;
+  public static final String DFS_SPS_WORK_MULTIPLIER_PER_ITERATION =
+  "dfs.storage.policy.satisfier.work.multiplier.per.iteration";
+  public static final int DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT =
+  1;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50d23317/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
index f5ceeaf..c26599c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
@@ -1457,7 +1457,27 @@ public class DFSUtil {
 "It should be a positive, non-zero integer value.");
 return blocksReplWorkMultiplier;
   }
-  
+
+  /**
+   * Get DFS_SPS_WORK_MULTIPLIER_PER_ITERATION from
+   * configuration.
+   *
+   * @param conf Configuration
+   * @return Value of DFS_SPS_WORK_MULTIPLIER_PER_ITERATION
+   */
+  public static int getSPSWorkMultiplier(Configuration conf) {
+int spsWorkMultiplier = conf
+.getInt(
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION,
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION_DEFAULT);
+Preconditions.checkArgument(
+(spsWorkMultiplier > 0),
+DFSConfigKeys.DFS_SPS_WORK_MULTIPLIER_PER_ITERATION +
+" = '" + spsWorkMultiplier + "' is invalid. " +
+"It should be a positive, non-zero integer value.");
+return spsWorkMultiplier;
+  }
+
   /**
* Get SPNEGO keytab Key from configuration
* 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/50d23317/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BlockStorageMovementAttemptedItems.java
--
diff --git 

[09/50] [abbrv] hadoop git commit: HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks before removing the xattr. Contributed by Surendra Singh Lilhore.

2018-07-12 Thread rakeshr
HDFS-11965: [SPS]: Should give chance to satisfy the low redundant blocks 
before removing the xattr. Contributed by Surendra Singh Lilhore.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/55f32ce1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/55f32ce1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/55f32ce1

Branch: refs/heads/HDFS-10285
Commit: 55f32ce108cb2630fb2cfff7088e105451f07d48
Parents: e64323e
Author: Uma Maheswara Rao G 
Authored: Mon Jul 10 18:00:58 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:01:20 2018 +0530

--
 .../server/blockmanagement/BlockManager.java|  15 +++
 .../server/namenode/StoragePolicySatisfier.java |  20 +++-
 .../namenode/TestStoragePolicySatisfier.java| 102 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  90 
 4 files changed, 224 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f32ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
index 1319a2c..ddf3f6c 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
@@ -4324,6 +4324,21 @@ public class BlockManager implements BlockStatsMXBean {
   }
 
   /**
+   * Check file has low redundancy blocks.
+   */
+  public boolean hasLowRedundancyBlocks(BlockCollection bc) {
+boolean result = false;
+for (BlockInfo block : bc.getBlocks()) {
+  short expected = getExpectedRedundancyNum(block);
+  final NumberReplicas n = countNodes(block);
+  if (expected > n.liveReplicas()) {
+result = true;
+  }
+}
+return result;
+  }
+
+  /**
* Check sufficient redundancy of the blocks in the collection. If any block
* is needed reconstruction, insert it into the reconstruction queue.
* Otherwise, if the block is more than the expected replication factor,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/55f32ce1/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
index 1b2afa3..97cbf1b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StoragePolicySatisfier.java
@@ -99,7 +99,10 @@ public class StoragePolicySatisfier implements Runnable {
 // Represents that, the analysis skipped due to some conditions.
 // Example conditions are if no blocks really exists in block collection or
 // if analysis is not required on ec files with unsuitable storage policies
-BLOCKS_TARGET_PAIRING_SKIPPED;
+BLOCKS_TARGET_PAIRING_SKIPPED,
+// Represents that, All the reported blocks are satisfied the policy but
+// some of the blocks are low redundant.
+FEW_LOW_REDUNDANCY_BLOCKS
   }
 
   public StoragePolicySatisfier(final Namesystem namesystem,
@@ -247,6 +250,14 @@ public class StoragePolicySatisfier implements Runnable {
   case FEW_BLOCKS_TARGETS_PAIRED:
 this.storageMovementsMonitor.add(blockCollectionID, false);
 break;
+  case FEW_LOW_REDUNDANCY_BLOCKS:
+if (LOG.isDebugEnabled()) {
+  LOG.debug("Adding trackID " + blockCollectionID
+  + " back to retry queue as some of the blocks"
+  + " are low redundant.");
+}
+this.storageMovementNeeded.add(blockCollectionID);
+break;
   // Just clean Xattrs
   case BLOCKS_TARGET_PAIRING_SKIPPED:
   case BLOCKS_ALREADY_SATISFIED:
@@ -347,11 +358,16 @@ public class StoragePolicySatisfier implements Runnable {
 boolean computeStatus = computeBlockMovingInfos(blockMovingInfos,
 blockInfo, expectedStorageTypes, existing, storages);
 if (computeStatus
-&& status != 

[20/50] [abbrv] hadoop git commit: HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block storage movements. Contributed by Rakesh R.

2018-07-12 Thread rakeshr
HDFS-12570: [SPS]: Refactor Co-ordinator datanode logic to track the block 
storage movements. Contributed by Rakesh R.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/73a3c94c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/73a3c94c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/73a3c94c

Branch: refs/heads/HDFS-10285
Commit: 73a3c94cd3c838eb1be80fd5f59aa532771a242d
Parents: 50d2331
Author: Uma Maheswara Rao G 
Authored: Thu Oct 12 17:17:51 2017 -0700
Committer: Rakesh Radhakrishnan 
Committed: Thu Jul 12 17:02:31 2018 +0530

--
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   8 +-
 .../DatanodeProtocolClientSideTranslatorPB.java |  12 +-
 .../DatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../apache/hadoop/hdfs/protocolPB/PBHelper.java | 150 +++-
 .../blockmanagement/DatanodeDescriptor.java |  50 ++-
 .../server/blockmanagement/DatanodeManager.java | 104 --
 .../hdfs/server/datanode/BPOfferService.java|   3 +-
 .../hdfs/server/datanode/BPServiceActor.java|  33 +-
 .../datanode/BlockStorageMovementTracker.java   |  80 ++---
 .../datanode/StoragePolicySatisfyWorker.java| 214 
 .../BlockStorageMovementAttemptedItems.java | 299 
 .../BlockStorageMovementInfosBatch.java |  61 
 .../hdfs/server/namenode/FSNamesystem.java  |  11 +-
 .../hdfs/server/namenode/NameNodeRpcServer.java |   7 +-
 .../server/namenode/StoragePolicySatisfier.java | 343 ++-
 .../protocol/BlockStorageMovementCommand.java   |  99 ++
 .../BlocksStorageMoveAttemptFinished.java   |  48 +++
 .../protocol/BlocksStorageMovementResult.java   |  74 
 .../hdfs/server/protocol/DatanodeProtocol.java  |   5 +-
 .../src/main/proto/DatanodeProtocol.proto   |  30 +-
 .../src/main/resources/hdfs-default.xml |  21 +-
 .../src/site/markdown/ArchivalStorage.md|   6 +-
 .../TestNameNodePrunesMissingStorages.java  |   5 +-
 .../datanode/InternalDataNodeTestUtils.java |   4 +-
 .../server/datanode/TestBPOfferService.java |   4 +-
 .../hdfs/server/datanode/TestBlockRecovery.java |   4 +-
 .../server/datanode/TestDataNodeLifeline.java   |   6 +-
 .../TestDatanodeProtocolRetryPolicy.java|   4 +-
 .../server/datanode/TestFsDatasetCache.java |   4 +-
 .../TestStoragePolicySatisfyWorker.java |  52 ++-
 .../hdfs/server/datanode/TestStorageReport.java |   4 +-
 .../server/namenode/NNThroughputBenchmark.java  |   6 +-
 .../hdfs/server/namenode/NameNodeAdapter.java   |   4 +-
 .../TestBlockStorageMovementAttemptedItems.java | 145 
 .../hdfs/server/namenode/TestDeadDatanode.java  |   4 +-
 .../namenode/TestStoragePolicySatisfier.java| 115 ++-
 ...stStoragePolicySatisfierWithStripedFile.java |  20 +-
 37 files changed, 908 insertions(+), 1135 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/73a3c94c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
index c8fa40c..4a94220 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
@@ -623,11 +623,15 @@ public class DFSConfigKeys extends 
CommonConfigurationKeys {
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.recheck.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_RECHECK_TIMEOUT_MILLIS_DEFAULT =
-  5 * 60 * 1000;
+  1 * 60 * 1000;
   public static final String 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_KEY =
   "dfs.storage.policy.satisfier.self.retry.timeout.millis";
   public static final int 
DFS_STORAGE_POLICY_SATISFIER_SELF_RETRY_TIMEOUT_MILLIS_DEFAULT =
-  20 * 60 * 1000;
+  5 * 60 * 1000;
+  public static final String 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_KEY =
+  "dfs.storage.policy.satisfier.low.max-streams.preference";
+  public static final boolean 
DFS_STORAGE_POLICY_SATISFIER_SHARE_EQUAL_REPLICA_MAX_STREAMS_DEFAULT =
+  false;
 
   public static final String  DFS_DATANODE_ADDRESS_KEY = 
"dfs.datanode.address";
   public static final int DFS_DATANODE_DEFAULT_PORT = 9866;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/73a3c94c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java

hadoop git commit: YARN-8457. Compilation is broken with -Pyarn-ui.

2018-07-12 Thread stevel
Repository: hadoop
Updated Branches:
  refs/heads/HADOOP-15407 538fcf8bb -> fb17346b0


YARN-8457. Compilation is broken with -Pyarn-ui.

(cherry picked from commit 4ffe68a6f70ce01a5654da8991b4cdb35ae0bf1f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb17346b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb17346b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb17346b

Branch: refs/heads/HADOOP-15407
Commit: fb17346b0eb9e7161c8ab4b8f566f2d138fcaa41
Parents: 538fcf8
Author: Steve Loughran 
Authored: Thu Jul 12 14:00:40 2018 +0100
Committer: Steve Loughran 
Committed: Thu Jul 12 14:00:40 2018 +0100

--
 .../hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb17346b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
index 959e169..daf4462 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-ui/src/main/webapp/.bowerrc
@@ -1,4 +1,5 @@
 {
   "directory": "bower_components",
-  "analytics": false
+  "analytics": false,
+  "registry": "https://registry.bower.io;
 }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/2] hadoop git commit: HADOOP-15316. GenericTestUtils can exceed maxSleepTime. Contributed by Adam Antal.

2018-07-12 Thread mackrorysd
HADOOP-15316. GenericTestUtils can exceed maxSleepTime. Contributed by Adam 
Antal.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4f3f9391
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4f3f9391
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4f3f9391

Branch: refs/heads/trunk
Commit: 4f3f9391b035d7f7e285c332770c6c1ede9a5a85
Parents: b37074b
Author: Sean Mackrory 
Authored: Thu Jul 12 16:45:07 2018 +0200
Committer: Sean Mackrory 
Committed: Thu Jul 12 17:24:01 2018 +0200

--
 .../src/test/java/org/apache/hadoop/test/GenericTestUtils.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4f3f9391/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
index 3e9da1b..0112894 100644
--- 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/test/GenericTestUtils.java
@@ -661,7 +661,7 @@ public abstract class GenericTestUtils {
 public Object answer(InvocationOnMock invocation) throws Throwable {
   boolean interrupted = false;
   try {
-Thread.sleep(r.nextInt(maxSleepTime) + minSleepTime);
+Thread.sleep(r.nextInt(maxSleepTime - minSleepTime) + minSleepTime);
   } catch (InterruptedException ie) {
 interrupted = true;
   }


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/2] hadoop git commit: HADOOP-15349. S3Guard DDB retryBackoff to be more informative on limits exceeded. Contributed by Gabor Bota.

2018-07-12 Thread mackrorysd
Repository: hadoop
Updated Branches:
  refs/heads/trunk b37074be5 -> a08812a1b


HADOOP-15349. S3Guard DDB retryBackoff to be more informative on limits 
exceeded. Contributed by Gabor Bota.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a08812a1
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a08812a1
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a08812a1

Branch: refs/heads/trunk
Commit: a08812a1b10df059b26f6a216e6339490298ba28
Parents: 4f3f939
Author: Sean Mackrory 
Authored: Thu Jul 12 16:46:02 2018 +0200
Committer: Sean Mackrory 
Committed: Thu Jul 12 17:24:01 2018 +0200

--
 .../org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java   | 3 ++-
 1 file changed, 2 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a08812a1/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
--
diff --git 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
index 116827d..43849b1 100644
--- 
a/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
+++ 
b/hadoop-tools/hadoop-aws/src/main/java/org/apache/hadoop/fs/s3a/s3guard/DynamoDBMetadataStore.java
@@ -655,7 +655,8 @@ public class DynamoDBMetadataStore implements MetadataStore 
{
   retryCount, 0, true);
   if (action.action == RetryPolicy.RetryAction.RetryDecision.FAIL) {
 throw new IOException(
-String.format("Max retries exceeded (%d) for DynamoDB",
+String.format("Max retries exceeded (%d) for DynamoDB. This may be"
++ " because write threshold of DynamoDB is set too low.",
 retryCount));
   } else {
 LOG.debug("Sleeping {} msec before next retry", action.delayMillis);


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org