[12/49] hadoop git commit: HDFS-9449. DiskBalancer: Add connectors. Contributed by Anu Engineer

2016-06-23 Thread arp
HDFS-9449. DiskBalancer: Add connectors. Contributed by Anu Engineer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/30c6ebd6
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/30c6ebd6
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/30c6ebd6

Branch: refs/heads/HDFS-1312
Commit: 30c6ebd69919a477a582e599fb253ffe5c2982e1
Parents: 91a5c48
Author: Tsz-Wo Nicholas Sze 
Authored: Tue Dec 1 14:43:06 2015 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |   2 +
 .../connectors/ConnectorFactory.java|  54 +++
 .../connectors/DBNameNodeConnector.java | 162 +++
 .../connectors/JsonNodeConnector.java   |  77 +
 .../diskbalancer/connectors/package-info.java   |  10 +-
 .../server/diskbalancer/TestConnectors.java |  82 ++
 6 files changed, 386 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/30c6ebd6/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
index 5a71032..cad8e49 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
@@ -3,3 +3,5 @@ HDFS-1312 Change Log
   NEW FEATURES
 
 HDFS-9420. Add DataModels for DiskBalancer. (Anu Engineer via szetszwo)
+
+HDFS-9449. DiskBalancer: Add connectors. (Anu Engineer via szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30c6ebd6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
new file mode 100644
index 000..040923a
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URISyntaxException;
+
+/**
+ * Connector factory creates appropriate connector based on the URL.
+ */
+public final class ConnectorFactory {
+  static final Log LOG = LogFactory.getLog(ConnectorFactory.class);
+
+  /**
+   * Constructs an appropriate connector based on the URL.
+   * @param clusterURI - URL
+   * @return ClusterConnector
+   */
+  public static ClusterConnector getCluster(URI clusterURI, Configuration
+  conf) throws IOException, URISyntaxException {
+LOG.info("Cluster URI : " + clusterURI);
+LOG.info("scheme : " + clusterURI.getScheme());
+if (clusterURI.getScheme().startsWith("file")) {
+  LOG.info("Creating a JsonNodeConnector");
+  return new JsonNodeConnector(clusterURI.toURL());
+} else {
+  LOG.info("Creating NameNode connector");
+  return new DBNameNodeConnector(clusterURI, conf);
+}
+  }
+
+  private ConnectorFactory() {
+// never constructed
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/30c6ebd6/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
 

[15/49] hadoop git commit: HDFS-9595. DiskBalancer: Add cancelPlan RPC. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9595. DiskBalancer: Add cancelPlan RPC. (Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0501d430
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0501d430
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0501d430

Branch: refs/heads/HDFS-1312
Commit: 0501d430e2f6111ad8b65dc36f4a98d94cb9589b
Parents: 7100c0d
Author: Arpit Agarwal 
Authored: Fri Jan 15 16:08:49 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  7 +++
 .../ClientDatanodeProtocolTranslatorPB.java | 19 +++
 .../src/main/proto/ClientDatanodeProtocol.proto | 19 +++
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |  3 ++
 ...tDatanodeProtocolServerSideTranslatorPB.java | 22 
 .../hadoop/hdfs/server/datanode/DataNode.java   |  7 +++
 .../diskbalancer/planner/GreedyPlanner.java |  4 ++
 .../diskbalancer/TestDiskBalancerRPC.java   | 56 
 8 files changed, 127 insertions(+), 10 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0501d430/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 6e9cef0..125a3c1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -170,4 +170,11 @@ public interface ClientDatanodeProtocol {
   void submitDiskBalancerPlan(String planID, long planVersion, long bandwidth,
   String plan) throws IOException;
 
+  /**
+   * Cancel an executing plan.
+   *
+   * @param planID - A SHA512 hash of the plan string.
+   */
+  void cancelDiskBalancePlan(String planID) throws IOException;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0501d430/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index da8d962..e037fcf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -53,6 +53,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Shutdo
 import 
org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -358,4 +359,22 @@ public class ClientDatanodeProtocolTranslatorPB implements
   throw ProtobufHelper.getRemoteException(e);
 }
   }
+
+  /**
+   * Cancels an executing disk balancer plan.
+   * @param planID - A SHA512 hash of the plan string.
+   *
+   * @throws IOException on error
+   */
+  @Override
+  public void cancelDiskBalancePlan(String planID)
+  throws IOException {
+try {
+  CancelPlanRequestProto request = CancelPlanRequestProto.newBuilder()
+  .setPlanID(planID).build();
+  rpcProxy.cancelDiskBalancerPlan(NULL_CONTROLLER, request);
+} catch (ServiceException e) {
+  throw ProtobufHelper.getRemoteException(e);
+}
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0501d430/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto
 

[14/49] hadoop git commit: HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66f0bb64
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66f0bb64
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66f0bb64

Branch: refs/heads/HDFS-1312
Commit: 66f0bb646d040a80bde75b5b3f7eacafd0034fe4
Parents: 96fe685
Author: Arpit Agarwal 
Authored: Fri Jan 29 11:05:53 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   | 10 ++
 .../ClientDatanodeProtocolTranslatorPB.java | 17 +-
 .../src/main/proto/ClientDatanodeProtocol.proto | 19 +++
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |  3 ++
 ...tDatanodeProtocolServerSideTranslatorPB.java | 24 --
 .../hadoop/hdfs/server/datanode/DataNode.java   | 14 
 .../diskbalancer/DiskBalancerConstants.java | 35 
 .../diskbalancer/TestDiskBalancerRPC.java   | 16 ++---
 8 files changed, 130 insertions(+), 8 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66f0bb64/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 705c98f..dede89e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -183,4 +183,14 @@ public interface ClientDatanodeProtocol {
* Gets the status of an executing diskbalancer Plan.
*/
   WorkStatus queryDiskBalancerPlan() throws IOException;
+
+  /**
+   * Gets a run-time configuration value from running diskbalancer instance.
+   * For example : Disk Balancer bandwidth of a running instance.
+   *
+   * @param key runtime configuration key
+   * @return value of the key as a string.
+   * @throws IOException - Throws if there is no such key
+   */
+  String getDiskBalancerSetting(String key) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66f0bb64/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 59f2fd2..e7e0d94 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -56,6 +56,8 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Submit
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
 import org.apache.hadoop.ipc.ProtobufHelper;
@@ -365,8 +367,8 @@ public class ClientDatanodeProtocolTranslatorPB implements
 
   /**
* Cancels an executing disk balancer plan.
-   * @param planID - A SHA512 hash of the plan string.
*
+   * @param planID - A SHA512 hash of the plan string.
* @throws IOException on error
*/
   @Override
@@ -399,4 +401,17 @@ public class ClientDatanodeProtocolTranslatorPB implements
   throw ProtobufHelper.getRemoteException(e);
 }
   }
+
+  @Override
+  public String getDiskBalancerSetting(String key) throws IOException {
+try {
+  DiskBalancerSettingRequestProto request =
+  DiskBalancerSettingRequestProto.newBuilder().setKey(key).build();
+  

[11/49] hadoop git commit: HDFS-9611. DiskBalancer : Replace htrace json imports with jackson. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9611. DiskBalancer : Replace htrace json imports with jackson. 
(Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e325c6ad
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e325c6ad
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e325c6ad

Branch: refs/heads/HDFS-1312
Commit: e325c6ade9dab41e0fbbf5222bb72236d4d5a77f
Parents: 599eca0
Author: Arpit Agarwal 
Authored: Tue Jan 5 09:10:03 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt| 4 
 .../server/diskbalancer/datamodel/DiskBalancerVolumeSet.java | 4 ++--
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e325c6ad/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
index 952813b..8220f88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
@@ -7,3 +7,7 @@ HDFS-1312 Change Log
 HDFS-9449. DiskBalancer: Add connectors. (Anu Engineer via szetszwo)
 
 HDFS-9526. Fix jackson annotation imports. (Xiaobing Zhou via szetszwo)
+
+HDFS-9611. DiskBalancer : Replace htrace json imports with jackson.
+(Anu Engineer via Arpit Agarwal)
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e325c6ad/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
index 15c21ac..49c8558 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolumeSet.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
 import com.google.common.base.Preconditions;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import org.apache.htrace.fasterxml.jackson.annotation.JsonIgnore;
-import org.apache.htrace.fasterxml.jackson.annotation.JsonProperty;
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.annotate.JsonProperty;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 
 import java.io.Serializable;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/49] hadoop git commit: HDFS-9420. Add DataModels for DiskBalancer. Contributed by Anu Engineer

2016-06-23 Thread arp
HDFS-9420. Add DataModels for DiskBalancer. Contributed by Anu Engineer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/91a5c481
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/91a5c481
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/91a5c481

Branch: refs/heads/HDFS-1312
Commit: 91a5c4814381a4d4c3ce9b29a1f85299e03be835
Parents: 0b9edf6
Author: Tsz-Wo Nicholas Sze 
Authored: Mon Nov 23 19:07:42 2015 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |   5 +
 .../connectors/ClusterConnector.java|  44 +++
 .../diskbalancer/connectors/package-info.java   |  29 ++
 .../datamodel/DiskBalancerCluster.java  | 249 ++
 .../datamodel/DiskBalancerDataNode.java | 269 +++
 .../datamodel/DiskBalancerVolume.java   | 330 +++
 .../datamodel/DiskBalancerVolumeSet.java| 325 ++
 .../diskbalancer/datamodel/package-info.java|  31 ++
 .../hdfs/server/diskbalancer/package-info.java  |  36 ++
 .../diskbalancer/DiskBalancerTestUtil.java  | 227 +
 .../server/diskbalancer/TestDataModels.java | 224 +
 .../diskbalancer/connectors/NullConnector.java  |  59 
 12 files changed, 1828 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a5c481/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
new file mode 100644
index 000..5a71032
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
@@ -0,0 +1,5 @@
+HDFS-1312 Change Log
+
+  NEW FEATURES
+
+HDFS-9420. Add DataModels for DiskBalancer. (Anu Engineer via szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a5c481/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ClusterConnector.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ClusterConnector.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ClusterConnector.java
new file mode 100644
index 000..3dbfec2
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ClusterConnector.java
@@ -0,0 +1,44 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
+ */
+
+package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
+
+import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
+
+import java.util.List;
+
+/**
+ * ClusterConnector interface hides all specifics about how we communicate to
+ * the HDFS cluster. This interface returns data in classes that diskbalancer
+ * understands.
+ */
+public interface ClusterConnector {
+
+  /**
+   * getNodes function returns a list of DiskBalancerDataNodes.
+   *
+   * @return Array of DiskBalancerDataNodes
+   */
+  List getNodes() throws Exception;
+
+  /**
+   * Returns info about the connector.
+   *
+   * @return String.
+   */
+  String getConnectorInfo();
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/91a5c481/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/package-info.java
new file mode 100644
index 000..8164804
--- /dev/null
+++ 

[27/49] hadoop git commit: HDFS-10501. DiskBalancer: Use the default datanode port if port is not provided. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10501. DiskBalancer: Use the default datanode port if port is not 
provided. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/121142cf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/121142cf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/121142cf

Branch: refs/heads/HDFS-1312
Commit: 121142cf952a4f9af1eb2488fe1714b6b8e685b6
Parents: 78a1032
Author: Anu Engineer 
Authored: Thu Jun 9 19:47:01 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../server/diskbalancer/command/QueryCommand.java | 18 --
 1 file changed, 16 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/121142cf/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index 36448b8..ea7dbcc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -22,10 +22,12 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command;
 import com.google.common.base.Preconditions;
 import org.apache.commons.cli.CommandLine;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
+import org.apache.hadoop.net.NetUtils;
 
 /**
  * Gets the current status of disk balancer command.
@@ -55,10 +57,22 @@ public class QueryCommand extends Command {
 verifyCommandOptions(DiskBalancer.QUERY, cmd);
 String nodeName = cmd.getOptionValue(DiskBalancer.QUERY);
 Preconditions.checkNotNull(nodeName);
-ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeName);
+nodeName = nodeName.trim();
+String nodeAddress = nodeName;
+
+// if the string is not name:port format use the default port.
+if(!nodeName.matches("^.*:\\d$")) {
+  int defaultIPC = NetUtils.createSocketAddr(
+  getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
+  DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();
+  nodeAddress = nodeName + ":" + defaultIPC;
+  LOG.debug("Using default data node port :  {}", nodeAddress);
+}
+
+ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeAddress);
 try {
   DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
-  System.out.printf("Plan ID: %s Result: %s%n", workStatus.getPlanID(),
+  System.out.printf("Plan ID: %s %nResult: %s%n", workStatus.getPlanID(),
   workStatus.getResult().toString());
 
   if(cmd.hasOption(DiskBalancer.VERBOSE)) {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[31/49] hadoop git commit: HDFS-10517. DiskBalancer: Support help command. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10517. DiskBalancer: Support help command. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/af11ab34
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/af11ab34
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/af11ab34

Branch: refs/heads/HDFS-1312
Commit: af11ab34d0cff3883885a25aa918be4f98566142
Parents: b502102
Author: Anu Engineer 
Authored: Mon Jun 13 14:02:04 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../diskbalancer/command/CancelCommand.java |  20 +-
 .../server/diskbalancer/command/Command.java|   4 +-
 .../diskbalancer/command/ExecuteCommand.java|  17 +-
 .../diskbalancer/command/HelpCommand.java   | 108 +
 .../diskbalancer/command/PlanCommand.java   |  28 ++-
 .../diskbalancer/command/QueryCommand.java  |  15 +-
 .../diskbalancer/command/ReportCommand.java |  18 +-
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 233 ++-
 8 files changed, 360 insertions(+), 83 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/af11ab34/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
index 3834d9b..740292d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
 import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.io.IOUtils;
 import org.apache.hadoop.conf.Configuration;
@@ -126,12 +127,21 @@ public class CancelCommand extends Command {
 
   /**
* Gets extended help for this command.
-   *
-   * @return Help Message
*/
   @Override
-  protected String getHelp() {
-return "Cancels a running command. e.g -cancel  or -cancel " +
-" -node ";
+  public void printHelp() {
+String header = "Cancel command cancels a running disk balancer operation" 
+
+".\n\n";
+
+String footer = "\nCancel command can be run via pointing to a plan file," 
+
+" or by reading the plan ID using the query command and then using " +
+"planID and hostname. Examples of how to run this command are \n" +
+"hdfs diskbalancer -cancel  \n" +
+"hdfs diskbalancer -cancel  -node ";
+
+HelpFormatter helpFormatter = new HelpFormatter();
+helpFormatter.printHelp("hdfs diskbalancer -cancel  | -cancel " +
+" -node ",
+header, DiskBalancer.getCancelOptions(), footer);
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af11ab34/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index bbf91ca..d2813e7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -100,10 +100,8 @@ public abstract class Command extends Configured {
 
   /**
* Gets extended help for this command.
-   *
-   * @return Help Message
*/
-  protected abstract String getHelp();
+  public abstract void printHelp();
 
   /**
* verifies user provided URL.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/af11ab34/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
index 85f2a86..5fd1f0a 100644
--- 

[45/49] hadoop git commit: HDFS-10557. Fix handling of the -fs Generic option. (Arpit Agarwal)

2016-06-23 Thread arp
HDFS-10557. Fix handling of the -fs Generic option. (Arpit Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/66fa34c8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/66fa34c8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/66fa34c8

Branch: refs/heads/HDFS-1312
Commit: 66fa34c839c89733839cb67878fdfdc4b1f65ab8
Parents: 7b23ad1
Author: Arpit Agarwal 
Authored: Wed Jun 22 08:23:45 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:01 2016 -0700

--
 .../server/diskbalancer/command/Command.java| 27 +++-
 .../diskbalancer/command/PlanCommand.java   | 11 
 .../diskbalancer/command/ReportCommand.java |  6 ++---
 .../diskbalancer/planner/GreedyPlanner.java |  8 +++---
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 18 -
 .../src/site/markdown/HDFSDiskbalancer.md   |  6 ++---
 .../command/TestDiskBalancerCommand.java| 23 +++--
 7 files changed, 27 insertions(+), 72 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/66fa34c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 19f9945..3ea1b03 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -45,9 +45,7 @@ import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.net.MalformedURLException;
 import java.net.URI;
-import java.net.URISyntaxException;
 import java.net.URL;
 import java.nio.charset.Charset;
 import java.nio.file.Files;
@@ -93,8 +91,7 @@ public abstract class Command extends Configured {
* Executes the Client Calls.
*
* @param cmd - CommandLine
-   * @throws IOException
-   * @throws URISyntaxException
+   * @throws Exception
*/
   public abstract void execute(CommandLine cmd) throws Exception;
 
@@ -104,22 +101,6 @@ public abstract class Command extends Configured {
   public abstract void printHelp();
 
   /**
-   * verifies user provided URL.
-   *
-   * @param uri - UrlString
-   * @return URL
-   * @throws URISyntaxException, MalformedURLException
-   */
-  protected URI verifyURI(String uri)
-  throws URISyntaxException, MalformedURLException {
-if ((uri == null) || uri.isEmpty()) {
-  throw new MalformedURLException(
-  "A valid URI is needed to execute this command.");
-}
-return new URI(uri);
-  }
-
-  /**
* Process the URI and return the cluster with nodes setup. This is used in
* all commands.
*
@@ -130,11 +111,8 @@ public abstract class Command extends Configured {
   protected DiskBalancerCluster readClusterInfo(CommandLine cmd) throws
   Exception {
 Preconditions.checkNotNull(cmd);
-Preconditions
-.checkState(cmd.getOptionValue(DiskBalancer.NAMENODEURI) != null,
-"Required argument missing : uri");
 
-setClusterURI(verifyURI(cmd.getOptionValue(DiskBalancer.NAMENODEURI)));
+setClusterURI(FileSystem.getDefaultUri(getConf()));
 LOG.debug("using name node URI : {}", this.getClusterURI());
 ClusterConnector connector = ConnectorFactory.getCluster(this.clusterURI,
 getConf());
@@ -346,6 +324,7 @@ public abstract class Command extends Configured {
*
* @param fileName - fileName to open.
* @return OutputStream.
+   * @throws IOException
*/
   protected FSDataOutputStream create(String fileName) throws IOException {
 Preconditions.checkNotNull(fileName);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/66fa34c8/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index c13399b..20b4c6f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -63,10 +63,9 @@ public class 

[44/49] hadoop git commit: HDFS-10550. DiskBalancer: fix issue of order dependency in iteration in ReportCommand test. Contributed by Xiaobing Zhou.

2016-06-23 Thread arp
HDFS-10550. DiskBalancer: fix issue of order dependency in iteration in 
ReportCommand test. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7b23ad1e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7b23ad1e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7b23ad1e

Branch: refs/heads/HDFS-1312
Commit: 7b23ad1ef76739bc04374aefe6e8e88c23449653
Parents: c6ed548
Author: Anu Engineer 
Authored: Tue Jun 21 17:34:58 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:01 2016 -0700

--
 .../diskbalancer/command/ReportCommand.java | 17 +++--
 .../command/TestDiskBalancerCommand.java| 74 +---
 2 files changed, 44 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b23ad1e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
index eb6afcc..40729f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java
@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command;
 
 import java.io.PrintStream;
 import java.util.Collections;
+import java.util.List;
 import java.util.ListIterator;
 
 import org.apache.commons.cli.CommandLine;
@@ -32,6 +33,7 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSe
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
 
 import com.google.common.base.Preconditions;
+import com.google.common.collect.Lists;
 
 /**
  * Executes the report command.
@@ -164,9 +166,10 @@ public class ReportCommand extends Command {
 dbdn.getVolumeCount(),
 dbdn.getNodeDataDensity()));
 
+List volumeList = Lists.newArrayList();
 for (DiskBalancerVolumeSet vset : dbdn.getVolumeSets().values()) {
   for (DiskBalancerVolume vol : vset.getVolumes()) {
-result.appendln(String.format(volumeFormat,
+volumeList.add(String.format(volumeFormat,
 vol.getStorageType(),
 vol.getPath(),
 vol.getUsedRatio(),
@@ -181,6 +184,10 @@ public class ReportCommand extends Command {
 vol.isTransient() ? trueStr : falseStr));
   }
 }
+
+Collections.sort(volumeList);
+result.appendln(
+StringUtils.join(volumeList.toArray(), System.lineSeparator()));
   }
 }
   }
@@ -194,13 +201,13 @@ public class ReportCommand extends Command {
 " datanode, or prints out the list of nodes that will benefit from " +
 "running disk balancer. Top defaults to " + getDefaultTop();
 String footer = ". E.g.:\n"
-+ "hdfs diskbalancer -uri http://namenode.uri -report\n"
-+ "hdfs diskbalancer -uri http://namenode.uri -report -top 5\n"
-+ "hdfs diskbalancer -uri http://namenode.uri -report "
++ "hdfs diskbalancer -fs http://namenode.uri -report\n"
++ "hdfs diskbalancer -fs http://namenode.uri -report -top 5\n"
++ "hdfs diskbalancer -fs http://namenode.uri -report "
 + "-node {DataNodeID | IP | Hostname}";
 
 HelpFormatter helpFormatter = new HelpFormatter();
-helpFormatter.printHelp("hdfs diskbalancer -uri http://namenode.uri " +
+helpFormatter.printHelp("hdfs diskbalancer -fs http://namenode.uri " +
 "-report [options]",
 header, DiskBalancer.getReportOptions(), footer);
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7b23ad1e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index 57e59f6..3accbc2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -71,8 +71,10 

[36/49] hadoop git commit: HDFS-10496. DiskBalancer: ExecuteCommand checks planFile in a wrong way. Contributed by Lei (Eddy) Xu.

2016-06-23 Thread arp
HDFS-10496. DiskBalancer: ExecuteCommand checks planFile in a wrong way. 
Contributed by Lei (Eddy) Xu.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d2ff793a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d2ff793a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d2ff793a

Branch: refs/heads/HDFS-1312
Commit: d2ff793ae4788418116b64e7141e80897d4f9c24
Parents: 64ccb23
Author: Anu Engineer 
Authored: Tue Jun 7 15:10:22 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java| 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d2ff793a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
index c17ef00..6d30e86 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
@@ -61,7 +61,7 @@ public class ExecuteCommand extends Command {
 verifyCommandOptions(DiskBalancer.EXECUTE, cmd);
 
 String planFile = cmd.getOptionValue(DiskBalancer.EXECUTE);
-Preconditions.checkArgument(planFile == null || planFile.isEmpty(),
+Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
 "Invalid plan file specified.");
 
 String planData = null;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[22/49] hadoop git commit: HDFS-9856. Suppress Jenkins warning for sample JSON file. Contributed by Xiaobing Zhou

2016-06-23 Thread arp
HDFS-9856. Suppress Jenkins warning for sample JSON file. Contributed by 
Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3df0781a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3df0781a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3df0781a

Branch: refs/heads/HDFS-1312
Commit: 3df0781aa7dceadfdd3434ff81c47e73d9da008a
Parents: 9847640
Author: Anu Engineer 
Authored: Thu Mar 3 21:27:57 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/pom.xml | 2 ++
 1 file changed, 2 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3df0781a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 3696797..93e7392 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -396,6 +396,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 src/main/webapps/static/json-bignum.js
 src/main/webapps/static/dataTables.bootstrap.css
 src/main/webapps/static/dataTables.bootstrap.js
+
src/test/resources/diskBalancer/data-cluster-3node-3disk.json
+HDFS-1312_CHANGES.txt
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[17/49] hadoop git commit: HDFS-8821. Stop tracking CHANGES.txt in the HDFS-1312 feature branch. Contributed by Xiaobing Zhou

2016-06-23 Thread arp
HDFS-8821. Stop tracking CHANGES.txt in the HDFS-1312 feature branch. 
Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/4b93ddae
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/4b93ddae
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/4b93ddae

Branch: refs/heads/HDFS-1312
Commit: 4b93ddae07ba4332f40f896542ee2c6d7bf899ed
Parents: 747227e
Author: Anu Engineer 
Authored: Tue Mar 8 20:35:59 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   | 35 
 hadoop-hdfs-project/hadoop-hdfs/pom.xml |  1 -
 2 files changed, 36 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b93ddae/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
deleted file mode 100644
index 919d73e..000
--- a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-HDFS-1312 Change Log
-
-  NEW FEATURES
-
-HDFS-9420. Add DataModels for DiskBalancer. (Anu Engineer via szetszwo)
-
-HDFS-9449. DiskBalancer: Add connectors. (Anu Engineer via szetszwo)
-
-HDFS-9526. Fix jackson annotation imports. (Xiaobing Zhou via szetszwo)
-
-HDFS-9611. DiskBalancer: Replace htrace json imports with jackson.
-(Anu Engineer via Arpit Agarwal)
-
-HDFS-9469. DiskBalancer: Add Planner. (Anu Engineer via Arpit Agarwal)
-
-HDFS-9588. DiskBalancer: Add submitDiskbalancer RPC. (Anu Engineer via
-Arpit Agarwal)
-
-HDFS-9595. DiskBalancer: Add cancelPlan RPC. (Anu Engineer via
-Arpit Agarwal)
-
-HDFS-9645. DiskBalancer: Add Query RPC. (Anu Engineer via Arpit Agarwal)
-
-HDFS-9647. DiskBalancer: Add getRuntimeSettings. (Anu Engineer via
-Arpit Agarwal)
-
-HDFS-9671. DiskBalancer: SubmitPlan implementation. (Anu Engineer via
-Arpit Agarwal)
-
-HDFS-9681. DiskBalancer: Add QueryPlan implementation. (Anu Engineer via
-Arpit Agarwal)
-
-HDFS-9683. DiskBalancer: Add cancelPlan implementation. (Anu Engineer via
-Arpit Agarwal)
-

http://git-wip-us.apache.org/repos/asf/hadoop/blob/4b93ddae/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
index 93e7392..e8397e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml
@@ -397,7 +397,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
 src/main/webapps/static/dataTables.bootstrap.css
 src/main/webapps/static/dataTables.bootstrap.js
 
src/test/resources/diskBalancer/data-cluster-3node-3disk.json
-HDFS-1312_CHANGES.txt
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[20/49] hadoop git commit: HDFS-9681. DiskBalancer: Add QueryPlan implementation. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9681. DiskBalancer: Add QueryPlan implementation. (Contributed by Anu 
Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e646c2eb
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e646c2eb
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e646c2eb

Branch: refs/heads/HDFS-1312
Commit: e646c2eb50b9ae2b0b084d78a4ea68e106804321
Parents: 2b1b2fa
Author: Arpit Agarwal 
Authored: Wed Feb 24 16:49:30 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../ClientDatanodeProtocolTranslatorPB.java |  11 +-
 .../server/datanode/DiskBalancerWorkStatus.java | 194 +--
 .../src/main/proto/ClientDatanodeProtocol.proto |   5 +-
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |   5 +-
 ...tDatanodeProtocolServerSideTranslatorPB.java |   5 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |   8 +-
 .../hdfs/server/datanode/DiskBalancer.java  |  39 
 .../diskbalancer/TestDiskBalancerRPC.java   |  26 ++-
 8 files changed, 249 insertions(+), 44 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e646c2eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 786d834..7076026 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -60,6 +60,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBa
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
@@ -392,10 +393,14 @@ public class ClientDatanodeProtocolTranslatorPB implements
   QueryPlanStatusRequestProto.newBuilder().build();
   QueryPlanStatusResponseProto response =
   rpcProxy.queryDiskBalancerPlan(NULL_CONTROLLER, request);
-  return new DiskBalancerWorkStatus(response.hasResult() ?
-  response.getResult() : 0,
+  DiskBalancerWorkStatus.Result result = Result.NO_PLAN;
+  if(response.hasResult()) {
+result = DiskBalancerWorkStatus.Result.values()[
+response.getResult()];
+  }
+
+  return new DiskBalancerWorkStatus(result,
   response.hasPlanID() ? response.getPlanID() : null,
-  response.hasStatus() ? response.getStatus() : null,
   response.hasCurrentStatus() ? response.getCurrentStatus() : null);
 } catch (ServiceException e) {
   throw ProtobufHelper.getRemoteException(e);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e646c2eb/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
index 6b29ce8..d6943cf 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
@@ -19,8 +19,17 @@
 
 package org.apache.hadoop.hdfs.server.datanode;
 
+
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.codehaus.jackson.map.ObjectMapper;
+
+import static org.codehaus.jackson.map.type.TypeFactory.defaultInstance;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.LinkedList;
 
 /**
  * Helper class that reports how much work has has been done by the node.
@@ -28,33 +37,69 @@ import 

[02/49] hadoop git commit: HDFS-9720. DiskBalancer : Add configuration parameters. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-9720. DiskBalancer : Add configuration parameters. Contributed by Anu 
Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/05067707
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/05067707
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/05067707

Branch: refs/heads/HDFS-1312
Commit: 050677077beaf42255b3936952b8e816a9201203
Parents: 6c606bf
Author: Anu Engineer 
Authored: Tue Apr 5 12:23:35 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  4 +-
 .../ClientDatanodeProtocolTranslatorPB.java |  8 +-
 .../server/datanode/DiskBalancerWorkItem.java   | 77 +++
 .../src/main/proto/ClientDatanodeProtocol.proto |  2 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   | 15 
 ...tDatanodeProtocolServerSideTranslatorPB.java |  6 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  4 +-
 .../hdfs/server/datanode/DiskBalancer.java  | 81 +++-
 .../server/diskbalancer/planner/MoveStep.java   | 75 ++
 .../hdfs/server/diskbalancer/planner/Step.java  | 23 ++
 .../diskbalancer/TestDiskBalancerRPC.java   | 31 
 .../TestDiskBalancerWithMockMover.java  | 37 -
 .../hdfs/server/diskbalancer/TestPlanner.java   | 29 ---
 13 files changed, 328 insertions(+), 64 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/05067707/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index d8df7fb..3993ce5 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -168,8 +168,8 @@ public interface ClientDatanodeProtocol {
   /**
* Submit a disk balancer plan for execution.
*/
-  void submitDiskBalancerPlan(String planID, long planVersion, long bandwidth,
-  String plan) throws IOException;
+  void submitDiskBalancerPlan(String planID, long planVersion, String plan,
+  boolean skipDateCheck) throws IOException;
 
   /**
* Cancel an executing plan.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/05067707/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 7076026..4f314e8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -342,22 +342,20 @@ public class ClientDatanodeProtocolTranslatorPB implements
*   local copies of these plans.
* @param planVersion - The data format of the plans - for future , not
*used now.
-   * @param bandwidth - Maximum disk bandwidth to consume, setting this value
-   *  to zero allows datanode to use the value defined in
-   *  configration.
* @param plan - Actual plan.
+   * @param skipDateCheck - Skips the date check.
* @throws IOException
*/
   @Override
   public void submitDiskBalancerPlan(String planID, long planVersion,
-  long bandwidth, String plan) throws IOException {
+  String plan, boolean skipDateCheck) throws IOException {
 try {
   SubmitDiskBalancerPlanRequestProto request =
   SubmitDiskBalancerPlanRequestProto.newBuilder()
   .setPlanID(planID)
   .setPlanVersion(planVersion)
-  .setMaxDiskBandwidth(bandwidth)
   .setPlan(plan)
+  .setIgnoreDateCheck(skipDateCheck)
   .build();
   rpcProxy.submitDiskBalancerPlan(NULL_CONTROLLER, request);
 } catch (ServiceException e) {


[23/49] hadoop git commit: HDFS-9543. DiskBalancer: Add Data mover. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-9543. DiskBalancer: Add Data mover. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1594b472
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1594b472
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1594b472

Branch: refs/heads/HDFS-1312
Commit: 1594b472bb9df7537dbc001411c99058cc11ba41
Parents: 7820737
Author: Anu Engineer 
Authored: Thu Apr 28 16:12:04 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:20:24 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |   2 -
 .../hdfs/server/datanode/DiskBalancer.java  | 365 +--
 .../datamodel/DiskBalancerDataNode.java |  13 +-
 .../datamodel/DiskBalancerVolume.java   |   6 +-
 .../datamodel/DiskBalancerVolumeSet.java|  34 +-
 .../server/diskbalancer/planner/MoveStep.java   |  14 +-
 .../hdfs/server/diskbalancer/planner/Step.java  |  20 +-
 .../hdfs/server/balancer/TestBalancer.java  |   3 +-
 .../server/diskbalancer/TestDiskBalancer.java   | 247 +
 .../hdfs/server/diskbalancer/TestPlanner.java   |  28 +-
 10 files changed, 666 insertions(+), 66 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1594b472/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 776da3a..d6be2e0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -3324,8 +3324,6 @@ public class DataNode extends ReconfigurableBase
* @param planID  - Hash value of the plan.
* @param planVersion - Plan version, reserved for future use. We have only
*version 1 now.
-   * @param bandwidth - Max disk bandwidth to use, 0 means use value defined
-   *  in the configration.
* @param plan - Actual plan
* @throws IOException
*/

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1594b472/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index b62a4fc..7f768ea 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -23,7 +23,9 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import 
org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.DiskBalancerWorkEntry;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus
+.DiskBalancerWorkEntry;
 import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus.Result;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
@@ -39,6 +41,8 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.nio.charset.Charset;
 import java.util.HashMap;
+import java.util.List;
+import java.util.LinkedList;
 import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.ExecutorService;
@@ -48,18 +52,21 @@ import java.util.concurrent.TimeUnit;
 import java.util.concurrent.atomic.AtomicBoolean;
 import java.util.concurrent.locks.ReentrantLock;
 
+import static java.util.concurrent.TimeUnit.MILLISECONDS;
+import static java.util.concurrent.TimeUnit.SECONDS;
+
 /**
  * Worker class for Disk Balancer.
- * 
+ * 
  * Here is the high level logic executed by this class. Users can submit disk
  * balancing plans using submitPlan calls. After a set of sanity checks the 
plan
  * is admitted and put into workMap.
- * 
+ * 
  * The executePlan launches a thread that picks up work from workMap and hands
  * it over to the BlockMover#copyBlocks function.
- * 
+ * 
  * Constraints :
- * 
+ * 
  * Only one plan can be executing in a datanode at any given 

[21/49] hadoop git commit: HDFS-9817. Use SLF4J in new classes. Contributed by Anu Engineer

2016-06-23 Thread arp
HDFS-9817. Use SLF4J in new classes. Contributed by Anu Engineer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/747227e9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/747227e9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/747227e9

Branch: refs/heads/HDFS-1312
Commit: 747227e9dea10ac6b5f601b7cf4dcc418b10d9c8
Parents: 3df0781
Author: Anu Engineer 
Authored: Fri Mar 4 20:16:13 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/server/datanode/DiskBalancer.java  |  7 +++---
 .../connectors/ConnectorFactory.java| 15 +++--
 .../connectors/DBNameNodeConnector.java |  9 
 .../connectors/JsonNodeConnector.java   |  7 +++---
 .../datamodel/DiskBalancerCluster.java  | 11 +-
 .../datamodel/DiskBalancerVolumeSet.java|  9 
 .../diskbalancer/planner/GreedyPlanner.java | 23 ++--
 .../diskbalancer/planner/PlannerFactory.java|  7 +++---
 8 files changed, 47 insertions(+), 41 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/747227e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 81dbb2d..d5c402e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -20,8 +20,8 @@ package org.apache.hadoop.hdfs.server.datanode;
 
 import com.google.common.base.Preconditions;
 import org.apache.commons.codec.digest.DigestUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -60,7 +60,8 @@ import java.util.concurrent.locks.ReentrantLock;
 @InterfaceAudience.Private
 public class DiskBalancer {
 
-  private static final Log LOG = LogFactory.getLog(DiskBalancer.class);
+  private static final Logger LOG = LoggerFactory.getLogger(DiskBalancer
+  .class);
   private final FsDatasetSpi dataset;
   private final String dataNodeUUID;
   private final BlockMover blockMover;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/747227e9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
index 040923a..484a64b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/ConnectorFactory.java
@@ -16,8 +16,8 @@
  */
 package org.apache.hadoop.hdfs.server.diskbalancer.connectors;
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
 import org.apache.hadoop.conf.Configuration;
 
 import java.io.IOException;
@@ -28,7 +28,8 @@ import java.net.URISyntaxException;
  * Connector factory creates appropriate connector based on the URL.
  */
 public final class ConnectorFactory {
-  static final Log LOG = LogFactory.getLog(ConnectorFactory.class);
+  private static final Logger LOG =
+  LoggerFactory.getLogger(ConnectorFactory.class);
 
   /**
* Constructs an appropriate connector based on the URL.
@@ -37,13 +38,13 @@ public final class ConnectorFactory {
*/
   public static ClusterConnector getCluster(URI clusterURI, Configuration
   conf) throws IOException, URISyntaxException {
-LOG.info("Cluster URI : " + clusterURI);
-LOG.info("scheme : " + clusterURI.getScheme());
+LOG.debug("Cluster URI : {}" , clusterURI);
+LOG.debug("scheme : {}" , clusterURI.getScheme());
 if (clusterURI.getScheme().startsWith("file")) {
-  LOG.info("Creating a JsonNodeConnector");
+  LOG.debug("Creating a JsonNodeConnector");
   return new 

[16/49] hadoop git commit: HDFS-9588. DiskBalancer: Add submitDiskbalancer RPC. (Anu Engineer via Arpit Agarwal)

2016-06-23 Thread arp
HDFS-9588. DiskBalancer: Add submitDiskbalancer RPC. (Anu Engineer via Arpit 
Agarwal)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7100c0da
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7100c0da
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7100c0da

Branch: refs/heads/HDFS-1312
Commit: 7100c0da353d0960d3db71b029a36247838b24c6
Parents: 5724a10
Author: Arpit Agarwal 
Authored: Mon Jan 11 20:31:18 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  7 ++
 .../ClientDatanodeProtocolTranslatorPB.java | 32 +++
 .../src/main/proto/ClientDatanodeProtocol.proto | 23 ++
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |  3 +
 ...tDatanodeProtocolServerSideTranslatorPB.java | 27 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   | 24 ++
 .../diskbalancer/DiskbalancerException.java | 86 +++
 .../diskbalancer/TestDiskBalancerRPC.java   | 87 
 8 files changed, 289 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7100c0da/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index e541388..6e9cef0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -163,4 +163,11 @@ public interface ClientDatanodeProtocol {
* @return balancer bandwidth
*/
   long getBalancerBandwidth() throws IOException;
+
+  /**
+   * Submit a disk balancer plan for execution.
+   */
+  void submitDiskBalancerPlan(String planID, long planVersion, long bandwidth,
+  String plan) throws IOException;
+
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7100c0da/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index 6aaa025..da8d962 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -52,6 +52,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.GetRe
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.ShutdownDatanodeRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.StartReconfigurationRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
@@ -326,4 +327,35 @@ public class ClientDatanodeProtocolTranslatorPB implements
   throw ProtobufHelper.getRemoteException(e);
 }
   }
+
+  /**
+   * Submits a disk balancer plan to the datanode.
+   * @param planID - Plan ID is the hash512 string of the plan that is
+   *   submitted. This is used by clients when they want to find
+   *   local copies of these plans.
+   * @param planVersion - The data format of the plans - for future , not
+   *used now.
+   * @param bandwidth - Maximum disk bandwidth to consume, setting this value
+   *  to zero allows datanode to use the value defined in
+   *  configration.
+   * @param plan - Actual plan.
+   * @return Success or throws Exception.
+   * @throws Exception
+   */
+  @Override
+  public void submitDiskBalancerPlan(String planID, long planVersion,
+  long bandwidth, String plan) throws IOException {
+try {
+  SubmitDiskBalancerPlanRequestProto request =
+  

[47/49] hadoop git commit: HDFS-10552. DiskBalancer "-query" results in NPE if no plan for the node. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10552. DiskBalancer "-query" results in NPE if no plan for the node. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a6e3541
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a6e3541
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a6e3541

Branch: refs/heads/HDFS-1312
Commit: 8a6e3541226fb1b6798cedecc56f1f160012becf
Parents: e8de281
Author: Anu Engineer 
Authored: Wed Jun 22 17:35:55 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:02 2016 -0700

--
 .../hdfs/server/datanode/DiskBalancer.java  |  5 +++-
 .../diskbalancer/command/QueryCommand.java  |  2 +-
 .../command/TestDiskBalancerCommand.java| 25 
 3 files changed, 30 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6e3541/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index b31b997..5a1fb9e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -104,6 +104,7 @@ public class DiskBalancer {
 scheduler = Executors.newSingleThreadExecutor();
 lock = new ReentrantLock();
 workMap = new ConcurrentHashMap<>();
+this.planID = "";  // to keep protobuf happy.
 this.isDiskBalancerEnabled = conf.getBoolean(
 DFSConfigKeys.DFS_DISK_BALANCER_ENABLED,
 DFSConfigKeys.DFS_DISK_BALANCER_ENABLED_DEFAULT);
@@ -223,7 +224,9 @@ public class DiskBalancer {
 lock.lock();
 try {
   checkDiskBalancerEnabled();
-  if ((this.planID == null) || (!this.planID.equals(planID))) {
+  if (this.planID == null ||
+  !this.planID.equals(planID) ||
+  this.planID.isEmpty()) {
 LOG.error("Disk Balancer - No such plan. Cancel plan failed. PlanID: " 
+
 planID);
 throw new DiskBalancerException("No such plan.",

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6e3541/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
index 6c759e2..fac1e51 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -63,7 +63,7 @@ public class QueryCommand extends Command {
 String nodeAddress = nodeName;
 
 // if the string is not name:port format use the default port.
-if (!nodeName.matches("^.*:\\d$")) {
+if (!nodeName.matches("[^\\:]+:[0-9]{2,5}")) {
   int defaultIPC = NetUtils.createSocketAddr(
   getConf().getTrimmed(DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_KEY,
   DFSConfigKeys.DFS_DATANODE_IPC_ADDRESS_DEFAULT)).getPort();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a6e3541/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index ceb762f..b0821e2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -35,6 +35,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import 

[19/49] hadoop git commit: HDFS-9671. DiskBalancer: SubmitPlan implementation. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9671. DiskBalancer: SubmitPlan implementation. (Contributed by Anu 
Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2b1b2faf
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2b1b2faf
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2b1b2faf

Branch: refs/heads/HDFS-1312
Commit: 2b1b2faf76a7ff148650a7836935a85439f60c49
Parents: 66f0bb6
Author: Arpit Agarwal 
Authored: Mon Feb 22 11:45:51 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |   4 +-
 .../ClientDatanodeProtocolTranslatorPB.java |  10 +-
 .../server/datanode/DiskBalancerWorkItem.java   | 160 ++
 .../server/datanode/DiskBalancerWorkStatus.java |  87 +++
 .../hadoop/hdfs/server/datanode/WorkStatus.java |  85 ---
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |   7 +-
 .../org/apache/hadoop/hdfs/DFSConfigKeys.java   |   5 +
 ...tDatanodeProtocolServerSideTranslatorPB.java |   4 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   |  56 +-
 .../hdfs/server/datanode/DiskBalancer.java  | 542 +++
 .../diskbalancer/DiskBalancerConstants.java |   9 +
 .../diskbalancer/DiskBalancerException.java |  98 
 .../diskbalancer/DiskbalancerException.java |  86 ---
 .../datamodel/DiskBalancerCluster.java  |  14 +
 .../diskbalancer/TestDiskBalancerRPC.java   |  28 +-
 15 files changed, 984 insertions(+), 211 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b1b2faf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index dede89e..d8df7fb 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -30,7 +30,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
-import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
 
 /** An client-datanode protocol for block recovery
  */
@@ -182,7 +182,7 @@ public interface ClientDatanodeProtocol {
   /**
* Gets the status of an executing diskbalancer Plan.
*/
-  WorkStatus queryDiskBalancerPlan() throws IOException;
+  DiskBalancerWorkStatus queryDiskBalancerPlan() throws IOException;
 
   /**
* Gets a run-time configuration value from running diskbalancer instance.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/2b1b2faf/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index e7e0d94..786d834 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -59,7 +59,7 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryP
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.DiskBalancerSettingResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
-import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
@@ -345,8 +345,7 @@ public class ClientDatanodeProtocolTranslatorPB implements
*  to zero allows datanode to use the value defined in
*  configration.
* @param plan - Actual plan.
-   * @return Success or throws 

[39/49] hadoop git commit: HDFS-10399. DiskBalancer: Add JMX for DiskBalancer. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10399. DiskBalancer: Add JMX for DiskBalancer. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5df2d2b8
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5df2d2b8
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5df2d2b8

Branch: refs/heads/HDFS-1312
Commit: 5df2d2b8fd2932fb6e931d948fb6620ab7bcf0bb
Parents: 1b39b28
Author: Anu Engineer 
Authored: Fri May 20 08:53:28 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../server/datanode/DiskBalancerWorkItem.java   | 10 ++-
 .../server/datanode/DiskBalancerWorkStatus.java | 30 +++-
 ...tDatanodeProtocolServerSideTranslatorPB.java |  2 +-
 .../hadoop/hdfs/server/datanode/DataNode.java   | 10 +++
 .../hdfs/server/datanode/DataNodeMXBean.java|  8 ++
 .../server/diskbalancer/TestDiskBalancer.java   |  6 
 .../TestDiskBalancerWithMockMover.java  | 22 --
 7 files changed, 77 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5df2d2b8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
index 7381499..fe908d8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
@@ -34,7 +34,7 @@ import java.io.IOException;
 @InterfaceStability.Unstable
 @JsonInclude(JsonInclude.Include.NON_DEFAULT)
 public class DiskBalancerWorkItem {
-  private final long bytesToCopy;
+  private long bytesToCopy;
   private long bytesCopied;
   private long errorCount;
   private String errMsg;
@@ -45,6 +45,14 @@ public class DiskBalancerWorkItem {
   private long bandwidth;
 
   /**
+   * Empty constructor for Json serialization.
+   */
+  public DiskBalancerWorkItem() {
+
+  }
+
+
+  /**
* Constructs a DiskBalancerWorkItem.
*
* @param bytesToCopy - Total bytes to copy from a disk

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5df2d2b8/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
index d6943cf..ca5e5f0 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
@@ -126,11 +126,29 @@ public class DiskBalancerWorkStatus {
*
* @throws IOException
**/
-  public String getCurrentStateString() throws IOException {
+  public String currentStateString() throws IOException {
 ObjectMapper mapper = new ObjectMapper();
 return mapper.writeValueAsString(currentState);
   }
 
+  public String toJsonString() throws IOException {
+ObjectMapper mapper = new ObjectMapper();
+return mapper.writeValueAsString(this);
+
+  }
+
+  /**
+   * Returns a DiskBalancerWorkStatus object from the Json .
+   * @param json - json String
+   * @return DiskBalancerWorkStatus
+   * @throws IOException
+   */
+  public static DiskBalancerWorkStatus parseJson(String json) throws
+  IOException {
+ObjectMapper mapper = new ObjectMapper();
+return mapper.readValue(json, DiskBalancerWorkStatus.class);
+  }
+
 
   /**
* Adds a new work entry to the list.
@@ -177,6 +195,16 @@ public class DiskBalancerWorkStatus {
 private DiskBalancerWorkItem workItem;
 
 /**
+ * Constructor needed for json serialization.
+ */
+public DiskBalancerWorkEntry() {
+}
+
+public DiskBalancerWorkEntry(String workItem) throws IOException {
+  this.workItem = DiskBalancerWorkItem.parseJson(workItem);
+}
+
+/**
  * Constructs a Work Entry class.
  *
  * @param sourcePath - Source Path where we are moving data from.


[46/49] hadoop git commit: HDFS-10545. DiskBalancer: PlanCommand should use -fs instead of -uri to be consistent with other hdfs commands. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10545. DiskBalancer: PlanCommand should use -fs instead of -uri to be 
consistent with other hdfs commands. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0774412e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0774412e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0774412e

Branch: refs/heads/HDFS-1312
Commit: 0774412e41856b4ed3eccfa9270165e216d10ab8
Parents: 5b8e1c2
Author: Anu Engineer 
Authored: Fri Jun 17 23:31:21 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:01 2016 -0700

--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0774412e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
index 67703c4..8a900ba 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
@@ -60,7 +60,7 @@ public class DiskBalancer extends Configured implements Tool {
* 
* hdfs://namenode.uri or file:///data/myCluster.json
*/
-  public static final String NAMENODEURI = "uri";
+  public static final String NAMENODEURI = "fs";
   /**
* Computes a plan for a given set of nodes.
*/


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[34/49] hadoop git commit: HDFS-10402. DiskBalancer: Add QueryStatus command. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-10402. DiskBalancer: Add QueryStatus command. (Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9e5fcb5e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9e5fcb5e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9e5fcb5e

Branch: refs/heads/HDFS-1312
Commit: 9e5fcb5e40bb370e4579e6040c02e923c1a90427
Parents: 5df2d2b
Author: Arpit Agarwal 
Authored: Fri May 20 14:09:58 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../diskbalancer/command/QueryCommand.java  | 82 
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 22 ++
 2 files changed, 104 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e5fcb5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
new file mode 100644
index 000..36448b8
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/QueryCommand.java
@@ -0,0 +1,82 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.server.diskbalancer.command;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.cli.CommandLine;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.datanode.DiskBalancerWorkStatus;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
+import org.apache.hadoop.hdfs.tools.DiskBalancer;
+
+/**
+ * Gets the current status of disk balancer command.
+ */
+public class QueryCommand extends Command {
+
+  /**
+   * Constructs QueryCommand.
+   *
+   * @param conf - Configuration.
+   */
+  public QueryCommand(Configuration conf) {
+super(conf);
+addValidCommandParameters(DiskBalancer.QUERY, "Queries the status of disk" 
+
+" plan running on a given datanode.");
+addValidCommandParameters(DiskBalancer.VERBOSE, "Prints verbose results.");
+  }
+  /**
+   * Executes the Client Calls.
+   *
+   * @param cmd - CommandLine
+   */
+  @Override
+  public void execute(CommandLine cmd) throws Exception {
+LOG.info("Executing \"query plan\" command.");
+Preconditions.checkState(cmd.hasOption(DiskBalancer.QUERY));
+verifyCommandOptions(DiskBalancer.QUERY, cmd);
+String nodeName = cmd.getOptionValue(DiskBalancer.QUERY);
+Preconditions.checkNotNull(nodeName);
+ClientDatanodeProtocol dataNode = getDataNodeProxy(nodeName);
+try {
+  DiskBalancerWorkStatus workStatus = dataNode.queryDiskBalancerPlan();
+  System.out.printf("Plan ID: %s Result: %s%n", workStatus.getPlanID(),
+  workStatus.getResult().toString());
+
+  if(cmd.hasOption(DiskBalancer.VERBOSE)) {
+System.out.printf("%s", workStatus.currentStateString());
+  }
+} catch (DiskBalancerException ex) {
+  LOG.error("Query plan failed. ex: {}", ex);
+  throw ex;
+}
+  }
+
+  /**
+   * Gets extended help for this command.
+   *
+   * @return Help Message
+   */
+  @Override
+  protected String getHelp() {
+return "Gets the status of disk balancing on a given node";
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/9e5fcb5e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
index 

[30/49] hadoop git commit: HDFS-10518. DiskBalancer: Pretty-print json in Query command. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10518. DiskBalancer: Pretty-print json in Query command. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7e2be5c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7e2be5c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7e2be5c4

Branch: refs/heads/HDFS-1312
Commit: 7e2be5c4a0b68b556ec6afcb0e14e0ab5ef1a9b2
Parents: af11ab3
Author: Anu Engineer 
Authored: Mon Jun 13 14:11:23 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../server/datanode/DiskBalancerWorkItem.java   | 42 
 .../server/datanode/DiskBalancerWorkStatus.java |  2 +
 .../hdfs/server/datanode/DiskBalancer.java  | 53 ++--
 .../diskbalancer/command/QueryCommand.java  |  5 +-
 .../TestDiskBalancerWithMockMover.java  | 20 
 5 files changed, 116 insertions(+), 6 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e2be5c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
index fe908d8..f46a987 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkItem.java
@@ -34,6 +34,8 @@ import java.io.IOException;
 @InterfaceStability.Unstable
 @JsonInclude(JsonInclude.Include.NON_DEFAULT)
 public class DiskBalancerWorkItem {
+  private  long startTime;
+  private long secondsElapsed;
   private long bytesToCopy;
   private long bytesCopied;
   private long errorCount;
@@ -242,4 +244,44 @@ public class DiskBalancerWorkItem {
   public void setBandwidth(long bandwidth) {
 this.bandwidth = bandwidth;
   }
+
+
+  /**
+   * Records the Start time of execution.
+   * @return startTime
+   */
+  public long getStartTime() {
+return startTime;
+  }
+
+  /**
+   * Sets the Start time.
+   * @param startTime  - Time stamp for start of execution.
+   */
+  public void setStartTime(long startTime) {
+this.startTime = startTime;
+  }
+
+  /**
+   * Gets the number of seconds elapsed from the start time.
+   *
+   * The reason why we have this is of time skews. The client's current time
+   * may not match with the server time stamp, hence the elapsed second
+   * cannot be computed from only startTime.
+   *
+   * @return seconds elapsed from start time.
+   */
+  public long getSecondsElapsed() {
+return secondsElapsed;
+  }
+
+  /**
+   * Sets number of seconds elapsed.
+   *
+   * This is updated whenever we update the other counters.
+   * @param secondsElapsed  - seconds elapsed.
+   */
+  public void setSecondsElapsed(long secondsElapsed) {
+this.secondsElapsed = secondsElapsed;
+  }
 }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7e2be5c4/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
index ca5e5f0..1f62f47 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancerWorkStatus.java
@@ -24,6 +24,7 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.codehaus.jackson.map.ObjectMapper;
+import org.codehaus.jackson.map.SerializationConfig;
 
 import static org.codehaus.jackson.map.type.TypeFactory.defaultInstance;
 
@@ -128,6 +129,7 @@ public class DiskBalancerWorkStatus {
**/
   public String currentStateString() throws IOException {
 ObjectMapper mapper = new ObjectMapper();
+mapper.enable(SerializationConfig.Feature.INDENT_OUTPUT);
 return mapper.writeValueAsString(currentState);
   }
 


[04/49] hadoop git commit: HDFS-10170. DiskBalancer: Force rebase diskbalancer branch. Contributed by Anu Engineer

2016-06-23 Thread arp
HDFS-10170. DiskBalancer: Force rebase diskbalancer branch. Contributed by Anu 
Engineer


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ec601673
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ec601673
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ec601673

Branch: refs/heads/HDFS-1312
Commit: ec601673a84832a47e6de0b0e34d0804cfed279d
Parents: 75a711a
Author: Anu Engineer 
Authored: Tue Mar 15 11:45:24 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../diskbalancer/DiskBalancerException.java | 32 +---
 1 file changed, 14 insertions(+), 18 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ec601673/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
index 38455a7..c3571c9 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
@@ -1,19 +1,18 @@
 /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
  * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations 
under
+ * the License.
  */
 package org.apache.hadoop.hdfs.server.diskbalancer;
 
@@ -23,7 +22,6 @@ import java.io.IOException;
  * Disk Balancer Exceptions.
  */
 public class DiskBalancerException extends IOException {
-  /** Possible results from DiskBalancer. **/
   public enum Result {
 DISK_BALANCER_NOT_ENABLED,
 INVALID_PLAN_VERSION,
@@ -35,9 +33,7 @@ public class DiskBalancerException extends IOException {
 PLAN_ALREADY_IN_PROGRESS,
 INVALID_VOLUME,
 INVALID_MOVE,
-INTERNAL_ERROR,
-NO_SUCH_PLAN,
-UNKNOWN_KEY
+INTERNAL_ERROR
   }
 
   private final Result result;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[38/49] hadoop git commit: HDFS-10478. DiskBalancer: resolve volume path names. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10478. DiskBalancer: resolve volume path names. Contributed by Anu 
Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/64ccb232
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/64ccb232
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/64ccb232

Branch: refs/heads/HDFS-1312
Commit: 64ccb232ccf204991a28fa0211917fa935ad30c5
Parents: 47dcb0f
Author: Anu Engineer 
Authored: Tue Jun 7 10:29:35 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../diskbalancer/command/PlanCommand.java   | 37 +++
 .../connectors/DBNameNodeConnector.java |  1 -
 .../hdfs/server/diskbalancer/TestPlanner.java   | 47 
 3 files changed, 84 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ccb232/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index d346c84..7cf0df1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -23,6 +23,10 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
+import org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolume;
+import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerVolumeSet;
 import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.hdfs.server.diskbalancer.datamodel
 .DiskBalancerDataNode;
@@ -32,7 +36,9 @@ import org.codehaus.jackson.map.ObjectMapper;
 
 import java.io.IOException;
 import java.nio.charset.StandardCharsets;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 
 /**
  * Class that implements Plan Command.
@@ -111,7 +117,10 @@ public class PlanCommand extends Command {
   cmd.getOptionValue(DiskBalancer.PLAN));
 }
 this.thresholdPercentage = getThresholdPercentage(cmd);
+
+LOG.debug("threshold Percentage is {}", this.thresholdPercentage);
 setNodesToProcess(node);
+populatePathNames(node);
 
 List plans = getCluster().computePlan(this.thresholdPercentage);
 setPlanParams(plans);
@@ -137,6 +146,32 @@ public class PlanCommand extends Command {
 }
   }
 
+
+  /**
+   * Reads the Physical path of the disks we are balancing. This is needed to
+   * make the disk balancer human friendly and not used in balancing.
+   *
+   * @param node - Disk Balancer Node.
+   */
+  private void populatePathNames(DiskBalancerDataNode node) throws IOException 
{
+String dnAddress = node.getDataNodeIP() + ":" + node.getDataNodePort();
+ClientDatanodeProtocol dnClient = getDataNodeProxy(dnAddress);
+String volumeNameJson = dnClient.getDiskBalancerSetting(
+DiskBalancerConstants.DISKBALANCER_VOLUME_NAME);
+ObjectMapper mapper = new ObjectMapper();
+
+@SuppressWarnings("unchecked")
+Map volumeMap =
+mapper.readValue(volumeNameJson, HashMap.class);
+for (DiskBalancerVolumeSet set : node.getVolumeSets().values()) {
+  for (DiskBalancerVolume vol : set.getVolumes()) {
+if (volumeMap.containsKey(vol.getUuid())) {
+  vol.setPath(volumeMap.get(vol.getUuid()));
+}
+  }
+}
+  }
+
   /**
* Gets extended help for this command.
*
@@ -198,9 +233,11 @@ public class PlanCommand extends Command {
 for (NodePlan plan : plans) {
   for (Step step : plan.getVolumeSetPlans()) {
 if (this.bandwidth > 0) {
+  LOG.debug("Setting bandwidth to {}", this.bandwidth);
   step.setBandwidth(this.bandwidth);
 }
 if (this.maxError > 0) {
+  LOG.debug("Setting max error to {}", this.maxError);
   step.setMaxDiskErrors(this.maxError);
 }
   }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/64ccb232/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/connectors/DBNameNodeConnector.java
--
diff --git 

[24/49] hadoop git commit: HDFS-9735. DiskBalancer : Refactor moveBlockAcrossStorage to be used by disk balancer. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-9735. DiskBalancer : Refactor moveBlockAcrossStorage to be used by disk 
balancer. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/7820737c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/7820737c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/7820737c

Branch: refs/heads/HDFS-1312
Commit: 7820737cfa178d9de1bcbb1e99b9677d70901914
Parents: 0506770
Author: Anu Engineer 
Authored: Mon Apr 11 15:58:06 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:20:24 2016 -0700

--
 .../server/datanode/fsdataset/FsDatasetSpi.java | 11 +++
 .../datanode/fsdataset/impl/FsDatasetImpl.java  | 95 +++-
 .../datanode/fsdataset/impl/FsVolumeImpl.java   | 13 +++
 .../server/datanode/SimulatedFSDataset.java |  7 ++
 .../extdataset/ExternalDatasetImpl.java |  8 ++
 .../diskbalancer/DiskBalancerTestUtil.java  | 62 ++---
 .../diskbalancer/TestDiskBalancerRPC.java   | 53 +--
 7 files changed, 210 insertions(+), 39 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/7820737c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
index 277b271..eeab098 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/FsDatasetSpi.java
@@ -628,4 +628,15 @@ public interface FsDatasetSpi 
extends FSDatasetMBean {
* Confirm whether the block is deleting
*/
   boolean isDeletingBlock(String bpid, long blockId);
+
+  /**
+   * Moves a given block from one volume to another volume. This is used by 
disk
+   * balancer.
+   *
+   * @param block   - ExtendedBlock
+   * @param destination - Destination volume
+   * @return Old replica info
+   */
+  ReplicaInfo moveBlockAcrossVolumes(final ExtendedBlock block,
+  FsVolumeSpi destination) throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/7820737c/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
index b042297..2b40538 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
@@ -947,29 +947,7 @@ class FsDatasetImpl implements FsDatasetSpi {
   volumeRef = volumes.getNextVolume(targetStorageType, 
block.getNumBytes());
 }
 try {
-  File oldBlockFile = replicaInfo.getBlockFile();
-  File oldMetaFile = replicaInfo.getMetaFile();
-  FsVolumeImpl targetVolume = (FsVolumeImpl) volumeRef.getVolume();
-  // Copy files to temp dir first
-  File[] blockFiles = copyBlockFiles(block.getBlockId(),
-  block.getGenerationStamp(), oldMetaFile, oldBlockFile,
-  targetVolume.getTmpDir(block.getBlockPoolId()),
-  replicaInfo.isOnTransientStorage(), smallBufferSize, conf);
-
-  ReplicaInfo newReplicaInfo = new ReplicaInPipeline(
-  replicaInfo.getBlockId(), replicaInfo.getGenerationStamp(),
-  targetVolume, blockFiles[0].getParentFile(), 0);
-  newReplicaInfo.setNumBytes(blockFiles[1].length());
-  // Finalize the copied files
-  newReplicaInfo = finalizeReplica(block.getBlockPoolId(), newReplicaInfo);
-  synchronized (this) {
-// Increment numBlocks here as this block moved without knowing to BPS
-FsVolumeImpl volume = (FsVolumeImpl) newReplicaInfo.getVolume();
-volume.getBlockPoolSlice(block.getBlockPoolId()).incrNumBlocks();
-  }
-
-  removeOldReplica(replicaInfo, newReplicaInfo, oldBlockFile, oldMetaFile,
-  oldBlockFile.length(), oldMetaFile.length(), block.getBlockPoolId());
+  moveBlock(block, replicaInfo, volumeRef);
 } finally {
   if (volumeRef != null) {
 volumeRef.close();
@@ -981,6 +959,77 @@ class FsDatasetImpl implements 

[09/49] hadoop git commit: HDFS-9702. DiskBalancer: getVolumeMap implementation. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9702. DiskBalancer: getVolumeMap implementation. (Contributed by Anu 
Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/918722bd
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/918722bd
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/918722bd

Branch: refs/heads/HDFS-1312
Commit: 918722bdd202acbeda92d650ff0dcecbcd8a0697
Parents: 4b93dda
Author: Arpit Agarwal 
Authored: Wed Mar 9 09:44:22 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   | 17 +++--
 .../hdfs/server/datanode/DiskBalancer.java  | 26 
 .../diskbalancer/DiskBalancerException.java |  3 +-
 .../diskbalancer/TestDiskBalancerRPC.java   | 66 
 4 files changed, 107 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/918722bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 126deb4..00e124d 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -169,6 +169,7 @@ import 
org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
 import org.apache.hadoop.hdfs.server.datanode.web.DatanodeHttpServer;
+import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerConstants;
 import org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import 
org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -3360,8 +3361,8 @@ public class DataNode extends ReconfigurableBase
   }
 
   /**
-   * Gets a run-time configuration value from running diskbalancer instance. 
For
-   * example : Disk Balancer bandwidth of a running instance.
+   * Gets a runtime configuration value from  diskbalancer instance. For
+   * example : DiskBalancer bandwidth.
*
* @param key - String that represents the run time key value.
* @return value of the key as a string.
@@ -3370,7 +3371,15 @@ public class DataNode extends ReconfigurableBase
   @Override
   public String getDiskBalancerSetting(String key) throws IOException {
 checkSuperuserPrivilege();
-throw new DiskBalancerException("Not Implemented",
-DiskBalancerException.Result.INTERNAL_ERROR);
+Preconditions.checkNotNull(key);
+switch (key) {
+case DiskBalancerConstants.DISKBALANCER_VOLUME_NAME:
+  return this.diskBalancer.getVolumeNames();
+default:
+  LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: " +
+  key);
+  throw new DiskBalancerException("Unknown key",
+  DiskBalancerException.Result.UNKNOWN_KEY);
+}
   }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/918722bd/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index d5c402e..9e41d2e 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -34,6 +34,7 @@ import 
org.apache.hadoop.hdfs.server.diskbalancer.DiskBalancerException;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
 import org.apache.hadoop.hdfs.server.diskbalancer.planner.Step;
 import org.apache.hadoop.util.Time;
+import org.codehaus.jackson.map.ObjectMapper;
 
 import java.io.IOException;
 import java.nio.charset.Charset;
@@ -221,6 +222,31 @@ public class DiskBalancer {
 }
   }
 
+  /**
+   * Returns a volume ID to Volume base path map.
+   *
+   * @return Json string of the volume map.
+   * @throws DiskBalancerException
+   */
+  public String getVolumeNames() throws DiskBalancerException {
+lock.lock();
+try {
+  checkDiskBalancerEnabled();
+  Map pathMap = new HashMap<>();
+ 

[41/49] hadoop git commit: HDFS-10540. Diskbalancer: The CLI error message for disk balancer is not enabled is not clear. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10540. Diskbalancer: The CLI error message for disk balancer is not 
enabled is not clear. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/cb68e5b3
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/cb68e5b3
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/cb68e5b3

Branch: refs/heads/HDFS-1312
Commit: cb68e5b3bdb0079af867a9e49559827ecee03010
Parents: 3225c24
Author: Anu Engineer 
Authored: Fri Jun 17 23:25:26 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:00 2016 -0700

--
 .../hdfs/server/datanode/DiskBalancer.java  |  2 +
 .../server/diskbalancer/command/Command.java|  2 +-
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 62 
 3 files changed, 28 insertions(+), 38 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb68e5b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 5fde7c5..b31b997 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -256,6 +256,8 @@ public class DiskBalancer {
   }
   ObjectMapper mapper = new ObjectMapper();
   return mapper.writeValueAsString(pathMap);
+} catch (DiskBalancerException ex) {
+  throw ex;
 } catch (IOException e) {
   throw new DiskBalancerException("Internal error, Unable to " +
   "create JSON string.", e,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb68e5b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index d2813e7..19f9945 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -171,7 +171,7 @@ public abstract class Command extends Configured {
   diskBalancerLogs = new Path(path);
 }
 if (fs.exists(diskBalancerLogs)) {
-  LOG.error("Another Diskbalancer instance is running ? - Target " +
+  LOG.debug("Another Diskbalancer instance is running ? - Target " +
   "Directory already exists. {}", diskBalancerLogs);
   throw new IOException("Another DiskBalancer files already exist at the " 
+
   "target location. " + diskBalancerLogs.toString());

http://git-wip-us.apache.org/repos/asf/hadoop/blob/cb68e5b3/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
index d83a49c..67703c4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
@@ -36,9 +36,7 @@ import org.apache.hadoop.util.ToolRunner;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
-import java.io.IOException;
 import java.io.PrintStream;
-import java.net.URISyntaxException;
 
 /**
  * DiskBalancer is a tool that can be used to ensure that data is spread evenly
@@ -169,7 +167,7 @@ public class DiskBalancer extends Configured implements 
Tool {
   res = ToolRunner.run(shell, argv);
 } catch (Exception ex) {
   LOG.error(ex.toString());
-  System.exit(1);
+  res = 1;
 }
 System.exit(res);
   }
@@ -449,51 +447,41 @@ public class DiskBalancer extends Configured implements 
Tool {
* @param cmd  - CommandLine
* @param opts options of command line
* @param out  the output stream used for printing
-   * @throws IOException
-   * @throws URISyntaxException
*/
   private int dispatch(CommandLine cmd, Options opts, final PrintStream out)
-  throws IOException, URISyntaxException {
+ 

[25/49] hadoop git commit: HDFS-10500. Diskbalancer: Print out information when a plan is not generated. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10500. Diskbalancer: Print out information when a plan is not generated. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/78a1032b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/78a1032b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/78a1032b

Branch: refs/heads/HDFS-1312
Commit: 78a1032b71af7672840da98808e2bebac3cc11d1
Parents: d2ff793
Author: Anu Engineer 
Authored: Thu Jun 9 13:43:19 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../diskbalancer/command/CancelCommand.java |  4 +-
 .../server/diskbalancer/command/Command.java| 19 +-
 .../diskbalancer/command/ExecuteCommand.java|  2 +-
 .../diskbalancer/command/PlanCommand.java   | 61 ++--
 4 files changed, 37 insertions(+), 49 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a1032b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
index f395802..3834d9b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/CancelCommand.java
@@ -70,7 +70,7 @@ public class CancelCommand extends Command {
   // points us to the plan file, we can compute the hash as well as read
   // the address of the datanode from the plan file.
   String planFile = cmd.getOptionValue(DiskBalancer.CANCEL);
-  Preconditions.checkArgument(planFile == null || planFile.isEmpty(),
+  Preconditions.checkArgument(planFile != null && !planFile.isEmpty(),
   "Invalid plan file specified.");
   String planData = null;
   try (FSDataInputStream plan = open(planFile)) {
@@ -88,7 +88,7 @@ public class CancelCommand extends Command {
*/
   private void cancelPlan(String planData) throws IOException {
 Preconditions.checkNotNull(planData);
-NodePlan plan = readPlan(planData);
+NodePlan plan = NodePlan.parseJson(planData);
 String dataNodeAddress = plan.getNodeName() + ":" + plan.getPort();
 Preconditions.checkNotNull(dataNodeAddress);
 ClientDatanodeProtocol dataNode = getDataNodeProxy(dataNodeAddress);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/78a1032b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index fb975a8..94a21d1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -31,16 +31,13 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
-import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ConnectorFactory;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerCluster;
 import 
org.apache.hadoop.hdfs.server.diskbalancer.datamodel.DiskBalancerDataNode;
-
+import org.apache.hadoop.hdfs.tools.DiskBalancer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.htrace.fasterxml.jackson.databind.ObjectMapper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -59,10 +56,10 @@ import java.util.Date;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.LinkedList;
+import java.util.List;
 import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
-import java.util.List;
 
 /**
  * Common interface for command handling.
@@ -394,16 +391,4 @@ public abstract class Command extends Configured {
   protected DiskBalancerCluster getCluster() {
 return cluster;
   }
-
-  

[26/49] hadoop git commit: HDFS-9546: DiskBalancer: Add Execute command. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-9546: DiskBalancer: Add Execute command. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/1b39b283
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/1b39b283
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/1b39b283

Branch: refs/heads/HDFS-1312
Commit: 1b39b283c70854bf3b77f5ba9fbcce064bfea5c3
Parents: 75882ec
Author: Anu Engineer 
Authored: Fri May 13 10:52:58 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../server/diskbalancer/command/Command.java|  18 ++-
 .../diskbalancer/command/ExecuteCommand.java| 119 +++
 .../diskbalancer/command/PlanCommand.java   |  22 ++--
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  |  71 ++-
 4 files changed, 187 insertions(+), 43 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b39b283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 6522434..919d549 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -24,6 +24,7 @@ import org.apache.commons.cli.Option;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -81,8 +82,6 @@ public abstract class Command extends Configured {
   public Command(Configuration conf) {
 super(conf);
 // These arguments are valid for all commands.
-addValidCommandParameters(DiskBalancer.NAMENODEURI, "Name Node URI or " +
-"file URI for cluster");
 addValidCommandParameters(DiskBalancer.HELP, "Help for this command");
 addValidCommandParameters("arg", "");
   }
@@ -348,10 +347,25 @@ public abstract class Command extends Configured {
* @return OutputStream.
*/
   protected FSDataOutputStream create(String fileName) throws IOException {
+Preconditions.checkNotNull(fileName);
+if(fs == null) {
+  fs = FileSystem.get(getConf());
+}
 return fs.create(new Path(this.diskBalancerLogs, fileName));
   }
 
   /**
+   * Returns a InputStream to read data.
+   */
+  protected FSDataInputStream open(String fileName) throws IOException {
+Preconditions.checkNotNull(fileName);
+if(fs == null) {
+  fs = FileSystem.get(getConf());
+}
+return  fs.open(new Path(fileName));
+  }
+
+  /**
* Returns the output path where the plan and snapshot gets written.
*
* @return Path

http://git-wip-us.apache.org/repos/asf/hadoop/blob/1b39b283/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
new file mode 100644
index 000..1f7e81f
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ExecuteCommand.java
@@ -0,0 +1,119 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ */
+
+package org.apache.hadoop.hdfs.server.diskbalancer.command;
+
+import com.google.common.base.Preconditions;

[32/49] hadoop git commit: HDFS-9545: DiskBalancer: Add Plan Command. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-9545: DiskBalancer: Add Plan Command. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75882ec0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75882ec0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75882ec0

Branch: refs/heads/HDFS-1312
Commit: 75882ec0b096da862b8b373b70a091c19f281b2a
Parents: 1594b47
Author: Anu Engineer 
Authored: Mon May 9 10:17:56 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../hadoop-hdfs/src/main/bin/hdfs   |   6 +
 .../server/diskbalancer/command/Command.java| 381 +++
 .../diskbalancer/command/PlanCommand.java   | 217 +++
 .../diskbalancer/command/package-info.java  |  22 ++
 .../datamodel/DiskBalancerCluster.java  |  89 +++--
 .../datamodel/DiskBalancerDataNode.java |   2 +-
 .../datamodel/DiskBalancerVolumeSet.java|   2 +-
 .../diskbalancer/planner/GreedyPlanner.java |   4 +-
 .../diskbalancer/planner/PlannerFactory.java|   6 +-
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  | 260 +
 .../TestDiskBalancerWithMockMover.java  |   2 -
 11 files changed, 945 insertions(+), 46 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75882ec0/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
index 2a29d17..7952560 100755
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/bin/hdfs
@@ -39,6 +39,7 @@ function hadoop_usage
   hadoop_add_subcommand "debug" "run a Debug Admin to execute HDFS debug 
commands"
   hadoop_add_subcommand "dfs" "run a filesystem command on the file system"
   hadoop_add_subcommand "dfsadmin" "run a DFS admin client"
+  hadoop_add_subcommand "diskbalancer" "Distributes data evenly among disks on 
a given node"
   hadoop_add_subcommand "envvars" "display computed Hadoop environment 
variables"
   hadoop_add_subcommand "erasurecode" "run a HDFS ErasureCoding CLI"
   hadoop_add_subcommand "fetchdt" "fetch a delegation token from the NameNode"
@@ -125,6 +126,11 @@ function hdfscmd_case
   hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
   HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
 ;;
+diskbalancer)
+  HADOOP_CLASSNAME=org.apache.hadoop.hdfs.tools.DiskBalancer
+  hadoop_debug "Appending HADOOP_CLIENT_OPTS onto HADOOP_OPTS"
+  HADOOP_OPTS="${HADOOP_OPTS} ${HADOOP_CLIENT_OPTS}"
+;;
 envvars)
   echo "JAVA_HOME='${JAVA_HOME}'"
   echo "HADOOP_HDFS_HOME='${HADOOP_HDFS_HOME}'"

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75882ec0/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
new file mode 100644
index 000..6522434
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -0,0 +1,381 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *  http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.diskbalancer.command;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.Option;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import 

[35/49] hadoop git commit: HDFS-9547. DiskBalancer: Add user documentation. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-9547. DiskBalancer: Add user documentation. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/06a9799d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/06a9799d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/06a9799d

Branch: refs/heads/HDFS-1312
Commit: 06a9799d84bef013e1573d382f824b485aa0c329
Parents: 43eee50
Author: Anu Engineer 
Authored: Thu May 26 10:23:08 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../src/site/markdown/HDFSCommands.md   |  22 
 .../src/site/markdown/HDFSDiskbalancer.md   | 117 +++
 2 files changed, 139 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/06a9799d/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index 5bb1a87..f868118 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -37,6 +37,7 @@ HDFS Commands Guide
 * [crypto](#crypto)
 * [datanode](#datanode)
 * [dfsadmin](#dfsadmin)
+* [diskbalancer](#diskbalancer)
 * [erasurecode](#erasurecode)
 * [haadmin](#haadmin)
 * [journalnode](#journalnode)
@@ -430,6 +431,27 @@ Usage:
 
 Runs a HDFS dfsadmin client.
 
+### `diskbalancer`
+
+Usage:
+
+   hdfs diskbalancer
+ [-plan  -uri ]
+ [-execute ]
+ [-query ]
+ [-cancel ]
+ [-cancel  -node ]
+
+| COMMAND\_OPTION | Description |
+|: |: |
+|-plan| Creates a disbalancer plan|
+|-execute| Executes a given plan on a datanode|
+|-query| Gets the current diskbalancer status from a datanode|
+|-cancel| Cancels a running plan|
+
+
+Runs the diskbalancer CLI. See [HDFS Diskbalancer](./HDFSDiskbalancer.html) 
for more information on this command.
+
 ### `erasurecode`
 
 Usage:

http://git-wip-us.apache.org/repos/asf/hadoop/blob/06a9799d/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
new file mode 100644
index 000..388a4c6
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -0,0 +1,117 @@
+
+
+HDFS Disk Balancer
+===
+
+* [Overview](#Overview)
+* [Architecture](#Architecture)
+* [Commands](#Commands)
+* [Settings](#Settings)
+
+
+Overview
+
+
+Diskbalancer is a command line tool that distributes data evenly on all disks 
of a datanode.
+This tool is different from  [Balancer](./HdfsUserGuide.html#Balancer)  which
+takes care of cluster-wide data balancing. Data can have uneven spread between
+disks on a node due to several reasons. This can happen due to large amount of
+writes and deletes or due to a disk replacement.This tool operates against a 
given datanode and moves blocks from one disk to another.
+
+
+
+Architecture
+
+
+Disk Balancer operates by creating a plan and goes on to execute that plan on 
the datanode.
+A plan is a set of statements that describe how much data should move between 
two disks.
+A plan is composed of multiple move steps. A move step has source disk, 
destination
+disk and number of bytes to move.A plan can be executed against an operational 
data node. Disk balancer should not
+interfere with other processes since it throttles how much data is copied
+every second. Please note that disk balancer is not enabled by default on a 
cluster.
+To enable diskbalancer `dfs.disk.balancer.enabled` must be set to `true` in 
hdfs-site.xml.
+
+
+Commands
+
+The following sections discusses what commands are supported by disk balancer
+ and how to use them.
+
+### Plan
+
+ The plan command can be run against a given datanode by running
+
+ `hdfs diskbalancer -uri hdfs://mycluster.com -plan node1.mycluster.com`
+
+ uri is the address of the namenode and -plan points to the datanode that we
+ need to plan for. By deafult, plan command writes its output to
+ **/system/diskbalancer**.
+
+ The plan command also has a set of parameters that allows user to control
+ the output and execution of the plan.
+
+| COMMAND\_OPTION| Description |
+|: |: |
+| `-out`|  Allows user to control the output location of the plan file.|
+| `-bandwidth`|Since datanode is operational and might be running 
other jobs, diskbalancer limits the amount of 

[29/49] hadoop git commit: HDFS-9461. DiskBalancer: Add Report Command. Contributed by Xiaobing Zhou.

2016-06-23 Thread arp
HDFS-9461. DiskBalancer: Add Report Command. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b502102b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b502102b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b502102b

Branch: refs/heads/HDFS-1312
Commit: b502102bb1a1f416f43dd1227886c57ccad70fcc
Parents: 121142c
Author: Anu Engineer 
Authored: Fri Jun 10 21:15:54 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../server/diskbalancer/command/Command.java|   67 +
 .../diskbalancer/command/ReportCommand.java |  197 +
 .../datamodel/DiskBalancerVolume.java   |   30 +
 .../apache/hadoop/hdfs/tools/DiskBalancer.java  |   61 +-
 .../command/TestDiskBalancerCommand.java|  299 +
 .../diskBalancer/data-cluster-64node-3disk.json | 9484 ++
 6 files changed, 10136 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b502102b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index 94a21d1..bbf91ca 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -21,6 +21,8 @@ package org.apache.hadoop.hdfs.server.diskbalancer.command;
 import com.google.common.base.Preconditions;
 import org.apache.commons.cli.CommandLine;
 import org.apache.commons.cli.Option;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.lang.text.StrBuilder;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -70,6 +72,7 @@ public abstract class Command extends Configured {
   private URI clusterURI;
   private FileSystem fs = null;
   private DiskBalancerCluster cluster = null;
+  private int topNodes;
 
   private static final Path DEFAULT_LOG_DIR = new Path("/system/diskbalancer");
 
@@ -83,6 +86,7 @@ public abstract class Command extends Configured {
 // These arguments are valid for all commands.
 addValidCommandParameters(DiskBalancer.HELP, "Help for this command");
 addValidCommandParameters("arg", "");
+topNodes = 0;
   }
 
   /**
@@ -391,4 +395,67 @@ public abstract class Command extends Configured {
   protected DiskBalancerCluster getCluster() {
 return cluster;
   }
+
+  /**
+   * returns default top number of nodes.
+   * @return default top number of nodes.
+   */
+  protected int getDefaultTop() {
+return DiskBalancer.DEFAULT_TOP;
+  }
+
+  /**
+   * Put output line to log and string buffer.
+   * */
+  protected void recordOutput(final StrBuilder result,
+  final String outputLine) {
+LOG.info(outputLine);
+result.appendln(outputLine);
+  }
+
+  /**
+   * Parse top number of nodes to be processed.
+   * @return top number of nodes to be processed.
+   */
+  protected int parseTopNodes(final CommandLine cmd, final StrBuilder result) {
+String outputLine = "";
+int nodes = 0;
+final String topVal = cmd.getOptionValue(DiskBalancer.TOP);
+if (StringUtils.isBlank(topVal)) {
+  outputLine = String.format(
+  "No top limit specified, using default top value %d.",
+  getDefaultTop());
+  LOG.info(outputLine);
+  result.appendln(outputLine);
+  nodes = getDefaultTop();
+} else {
+  try {
+nodes = Integer.parseInt(topVal);
+  } catch (NumberFormatException nfe) {
+outputLine = String.format(
+"Top limit input is not numeric, using default top value %d.",
+getDefaultTop());
+LOG.info(outputLine);
+result.appendln(outputLine);
+nodes = getDefaultTop();
+  }
+}
+
+return Math.min(nodes, cluster.getNodes().size());
+  }
+
+  /**
+   * Set top number of nodes to be processed.
+   * */
+  public void setTopNodes(int topNodes) {
+this.topNodes = topNodes;
+  }
+
+  /**
+   * Get top number of nodes to be processed.
+   * @return top number of nodes to be processed.
+   * */
+  public int getTopNodes() {
+return topNodes;
+  }
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/b502102b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/ReportCommand.java

[43/49] hadoop git commit: HDFS-10547. DiskBalancer: fix whitespace issue in doc files. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10547. DiskBalancer: fix whitespace issue in doc files. Contributed by Anu 
Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/c6ed5480
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/c6ed5480
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/c6ed5480

Branch: refs/heads/HDFS-1312
Commit: c6ed54808d086fffbc1cdf25f602c78798de789a
Parents: 0774412
Author: Anu Engineer 
Authored: Mon Jun 20 09:48:34 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:01 2016 -0700

--
 .../src/site/markdown/HDFSCommands.md   |  2 +-
 .../src/site/markdown/HDFSDiskbalancer.md   | 25 
 2 files changed, 16 insertions(+), 11 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6ed5480/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
index f868118..39e8991 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSCommands.md
@@ -436,7 +436,7 @@ Runs a HDFS dfsadmin client.
 Usage:
 
hdfs diskbalancer
- [-plan  -uri ]
+ [-plan  -fs ]
  [-execute ]
  [-query ]
  [-cancel ]

http://git-wip-us.apache.org/repos/asf/hadoop/blob/c6ed5480/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md 
b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
index 388a4c6..522dc5a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/site/markdown/HDFSDiskbalancer.md
@@ -64,11 +64,11 @@ The following sections discusses what commands are 
supported by disk balancer
 
 | COMMAND\_OPTION| Description |
 |: |: |
-| `-out`|  Allows user to control the output location of the plan file.|
-| `-bandwidth`|Since datanode is operational and might be running 
other jobs, diskbalancer limits the amount of data moved per second. This 
parameter allows user to set the maximum bandwidth to be used. This is not 
required to be set since diskBalancer will use the deafult bandwidth if this is 
not specified.|
-| `-thresholdPercentage`|  Since we operate against a snap-shot of 
datanode, themove operations have a tolerance percentage to declare success. If 
user specifies 10% and move operation is say 20GB in size, if we can move 18GB 
that operation is considered successful. This is to accomodate the changes in 
datanode in real time. This parameter is not needed and a default is used if 
not specified.|
-| `-maxerror` |Max error allows users to specify how many block copy 
operations must fail before we abort a move step. Once again, this is not a 
needed parameter and a system-default is used if not specified.|
-| `-v`|Verbose mode, specifying this parameter forces the plan command 
to print out a summary of the plan on stdout.|
+| `-out`| Allows user to control the output location of the plan file.|
+| `-bandwidth`| Since datanode is operational and might be running other jobs, 
diskbalancer limits the amount of data moved per second. This parameter allows 
user to set the maximum bandwidth to be used. This is not required to be set 
since diskBalancer will use the deafult bandwidth if this is not specified.|
+| `-thresholdPercentage`| Since we operate against a snap-shot of datanode, 
themove operations have a tolerance percentage to declare success. If user 
specifies 10% and move operation is say 20GB in size, if we can move 18GB that 
operation is considered successful. This is to accomodate the changes in 
datanode in real time. This parameter is not needed and a default is used if 
not specified.|
+| `-maxerror` | Max error allows users to specify how many block copy 
operations must fail before we abort a move step. Once again, this is not a 
needed parameter and a system-default is used if not specified.|
+| `-v`| Verbose mode, specifying this parameter forces the plan command to 
print out a summary of the plan on stdout.|
 
 The plan command writes two output files. They are `.before.json` 
which
 captures the state of the datanode before the diskbalancer is run, and 
`.plan.json`.
@@ -89,7 +89,7 @@ Query command gets the current status of the diskbalancer 
from a datanode.
 
 | COMMAND\_OPTION | Description |
 |: |: |
-|`-v` |Verbose mode, Prints out 

[49/49] hadoop git commit: HDFS-10571. TestDiskBalancerCommand#testPlanNode failed with IllegalArgumentException. Contributed by Xiaobing Zhou.

2016-06-23 Thread arp
HDFS-10571. TestDiskBalancerCommand#testPlanNode failed with 
IllegalArgumentException. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/b2584bee
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/b2584bee
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/b2584bee

Branch: refs/heads/HDFS-1312
Commit: b2584bee457192ea5789667c1317236f47fa6060
Parents: 8a6e354
Author: Anu Engineer 
Authored: Thu Jun 23 14:48:40 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:02 2016 -0700

--
 .../command/TestDiskBalancerCommand.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/b2584bee/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index b0821e2..e55c418 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -280,7 +280,7 @@ public class TestDiskBalancerCommand {
 final String cmdLine = String
 .format(
 "hdfs diskbalancer %s", planArg);
-runCommand(cmdLine);
+runCommand(cmdLine, cluster);
   }
 
   /* Test that illegal arguments are handled correctly*/
@@ -335,12 +335,12 @@ public class TestDiskBalancerCommand {
 runCommand(cmdLine);
   }
 
-  private List runCommand(final String cmdLine) throws Exception {
+  private List runCommandInternal(final String cmdLine) throws
+  Exception {
 String[] cmds = StringUtils.split(cmdLine, ' ');
 org.apache.hadoop.hdfs.tools.DiskBalancer db =
 new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);
 
-FileSystem.setDefaultUri(conf, clusterJson);
 ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
 PrintStream out = new PrintStream(bufOut);
 db.run(cmds, out);
@@ -353,6 +353,17 @@ public class TestDiskBalancerCommand {
 return outputs;
   }
 
+  private List runCommand(final String cmdLine) throws Exception {
+FileSystem.setDefaultUri(conf, clusterJson);
+return runCommandInternal(cmdLine);
+  }
+
+  private List runCommand(final String cmdLine,
+  MiniDFSCluster miniCluster) throws Exception 
{
+FileSystem.setDefaultUri(conf, miniCluster.getURI());
+return runCommandInternal(cmdLine);
+  }
+
   /**
* Making sure that we can query the node without having done a submit.
* @throws Exception


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/49] hadoop git commit: HDFS-9469. DiskBalancer: Add Planner. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9469. DiskBalancer: Add Planner. (Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5724a103
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5724a103
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5724a103

Branch: refs/heads/HDFS-1312
Commit: 5724a103161424f4b293ba937f0d0540179f36ac
Parents: e325c6a
Author: Arpit Agarwal 
Authored: Thu Jan 7 14:45:56 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |   4 +-
 .../datamodel/DiskBalancerCluster.java  | 114 -
 .../diskbalancer/planner/GreedyPlanner.java | 259 +++
 .../server/diskbalancer/planner/MoveStep.java   | 181 
 .../server/diskbalancer/planner/NodePlan.java   | 190 
 .../server/diskbalancer/planner/Planner.java|  28 ++
 .../diskbalancer/planner/PlannerFactory.java|  59 +++
 .../hdfs/server/diskbalancer/planner/Step.java  |  68 +++
 .../diskbalancer/planner/package-info.java  |  46 ++
 .../diskbalancer/DiskBalancerTestUtil.java  |   6 +-
 .../hdfs/server/diskbalancer/TestPlanner.java   | 462 +++
 .../diskBalancer/data-cluster-3node-3disk.json  | 380 +++
 12 files changed, 1792 insertions(+), 5 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5724a103/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
index 8220f88..940e1b5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
@@ -8,6 +8,8 @@ HDFS-1312 Change Log
 
 HDFS-9526. Fix jackson annotation imports. (Xiaobing Zhou via szetszwo)
 
-HDFS-9611. DiskBalancer : Replace htrace json imports with jackson.
+HDFS-9611. DiskBalancer: Replace htrace json imports with jackson.
 (Anu Engineer via Arpit Agarwal)
 
+HDFS-9469. DiskBalancer: Add Planner. (Anu Engineer via Arpit Agarwal)
+

http://git-wip-us.apache.org/repos/asf/hadoop/blob/5724a103/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
index 91f7eaa..af9e9af 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerCluster.java
@@ -22,16 +22,26 @@ import org.apache.commons.io.FileUtils;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hdfs.server.diskbalancer.connectors.ClusterConnector;
+import org.apache.hadoop.hdfs.server.diskbalancer.planner.NodePlan;
+import org.apache.hadoop.hdfs.server.diskbalancer.planner.Planner;
+import org.apache.hadoop.hdfs.server.diskbalancer.planner.PlannerFactory;
 import org.codehaus.jackson.annotate.JsonIgnore;
 import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import java.io.File;
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Paths;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Set;
 import java.util.TreeSet;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
 
 /**
  * DiskBalancerCluster represents the nodes that we are working against.
@@ -166,7 +176,7 @@ public class DiskBalancerCluster {
*/
   public void setThreshold(float thresholdPercent) {
 Preconditions.checkState((thresholdPercent >= 0.0f) &&
-(thresholdPercent <= 100.0f),  "A percentage value expected.");
+(thresholdPercent <= 100.0f), "A percentage value expected.");
 this.threshold = thresholdPercent;
   }
 
@@ -246,4 +256,106 @@ public class DiskBalancerCluster {
 File outFile = new File(getOutput() + "/" + snapShotName);
 FileUtils.writeStringToFile(outFile, json);
   }
+
+  /**
+   * Creates an Output directory for the cluster output.
+   *
+   * @throws IOException - On failure to create an new directory
+   */
+  

[13/49] hadoop git commit: HDFS-9526. Fix jackson annotation imports. Contributed by Xiaobing Zhou

2016-06-23 Thread arp
HDFS-9526. Fix jackson annotation imports. Contributed by Xiaobing Zhou


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/599eca07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/599eca07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/599eca07

Branch: refs/heads/HDFS-1312
Commit: 599eca07b422d8dbf015ed01a5d5a13e4488468c
Parents: 30c6ebd
Author: Tsz-Wo Nicholas Sze 
Authored: Wed Dec 9 12:58:55 2015 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt   | 2 ++
 .../hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java  | 5 +++--
 2 files changed, 5 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/599eca07/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt 
b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
index cad8e49..952813b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt
@@ -5,3 +5,5 @@ HDFS-1312 Change Log
 HDFS-9420. Add DataModels for DiskBalancer. (Anu Engineer via szetszwo)
 
 HDFS-9449. DiskBalancer: Add connectors. (Anu Engineer via szetszwo)
+
+HDFS-9526. Fix jackson annotation imports. (Xiaobing Zhou via szetszwo)

http://git-wip-us.apache.org/repos/asf/hadoop/blob/599eca07/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
index a608248..24e891f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/datamodel/DiskBalancerVolume.java
@@ -18,8 +18,9 @@
 package org.apache.hadoop.hdfs.server.diskbalancer.datamodel;
 
 import com.google.common.base.Preconditions;
-import org.apache.htrace.fasterxml.jackson.annotation.JsonIgnore;
-import org.apache.htrace.fasterxml.jackson.annotation.JsonIgnoreProperties;
+
+import org.codehaus.jackson.annotate.JsonIgnore;
+import org.codehaus.jackson.annotate.JsonIgnoreProperties;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import java.io.IOException;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/49] hadoop git commit: HDFS-9645. DiskBalancer: Add Query RPC. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9645. DiskBalancer: Add Query RPC. (Contributed by Anu Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96fe685b
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96fe685b
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96fe685b

Branch: refs/heads/HDFS-1312
Commit: 96fe685b7a4d8db63caabae9fae4987627f08231
Parents: 0501d43
Author: Arpit Agarwal 
Authored: Wed Jan 20 10:47:30 2016 -0800
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/protocol/ClientDatanodeProtocol.java   |  6 ++
 .../ClientDatanodeProtocolTranslatorPB.java | 22 +
 .../hadoop/hdfs/server/datanode/WorkStatus.java | 85 
 .../src/main/proto/ClientDatanodeProtocol.proto | 26 ++
 .../hadoop-hdfs/HDFS-1312_CHANGES.txt   |  2 +
 ...tDatanodeProtocolServerSideTranslatorPB.java | 23 ++
 .../hadoop/hdfs/server/datanode/DataNode.java   |  5 ++
 .../diskbalancer/TestDiskBalancerRPC.java   | 48 ++-
 8 files changed, 214 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fe685b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
index 125a3c1..705c98f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
@@ -30,6 +30,7 @@ import 
org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
 
 /** An client-datanode protocol for block recovery
  */
@@ -177,4 +178,9 @@ public interface ClientDatanodeProtocol {
*/
   void cancelDiskBalancePlan(String planID) throws IOException;
 
+
+  /**
+   * Gets the status of an executing diskbalancer Plan.
+   */
+  WorkStatus queryDiskBalancerPlan() throws IOException;
 }

http://git-wip-us.apache.org/repos/asf/hadoop/blob/96fe685b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
index e037fcf..59f2fd2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java
@@ -54,7 +54,10 @@ import 
org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.Start
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto;
 import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto;
+import 
org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.server.datanode.WorkStatus;
 import org.apache.hadoop.ipc.ProtobufHelper;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtocolMetaInterface;
@@ -377,4 +380,23 @@ public class ClientDatanodeProtocolTranslatorPB implements
   throw ProtobufHelper.getRemoteException(e);
 }
   }
+
+  /**
+   * Gets the status of an executing diskbalancer Plan.
+   */
+  @Override
+  public WorkStatus queryDiskBalancerPlan() throws IOException {
+try {
+  QueryPlanStatusRequestProto request =
+  QueryPlanStatusRequestProto.newBuilder().build();
+  QueryPlanStatusResponseProto response =
+  rpcProxy.queryDiskBalancerPlan(NULL_CONTROLLER, request);
+  return new WorkStatus(response.hasResult() ? 

[28/49] hadoop git commit: HDFS-9461. DiskBalancer: Add Report Command. Contributed by Xiaobing Zhou.

2016-06-23 Thread arp
http://git-wip-us.apache.org/repos/asf/hadoop/blob/b502102b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/diskBalancer/data-cluster-64node-3disk.json
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/diskBalancer/data-cluster-64node-3disk.json
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/diskBalancer/data-cluster-64node-3disk.json
new file mode 100644
index 000..4293a84
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/diskBalancer/data-cluster-64node-3disk.json
@@ -0,0 +1,9484 @@
+{
+   "exclusionList":[
+
+   ],
+   "inclusionList":[
+
+   ],
+   "nodes":[
+  {
+ "nodeDataDensity":1.4280236,
+ "volumeSets":{
+"DISK":{
+   "volumes":[
+  {
+ "path":"/tmp/disk/lDbBDbMk40",
+ "capacity":3,
+ "storageType":"DISK",
+ "used":2460994155387,
+ "reserved":215232314484,
+ "uuid":"4e893114-c28b-4b4a-8254-011bb2fb04d2",
+ "failed":false,
+ "volumeDataDensity":-0.033956468,
+ "skip":false,
+ "readOnly":false,
+ "transient":false
+  },
+  {
+ "path":"/tmp/disk/tseMsFTGAW",
+ "capacity":7,
+ "storageType":"DISK",
+ "used":4912132221384,
+ "reserved":1080424540820,
+ "uuid":"25a81a58-ac24-44dd-ab21-bd1a95789ebc",
+ "failed":false,
+ "volumeDataDensity":0.019966006,
+ "skip":false,
+ "readOnly":false,
+ "transient":false
+  },
+  {
+ "path":"/tmp/disk/U2L8YlldVc",
+ "capacity":6000,
+ "storageType":"DISK",
+ "used":464539264350,
+ "reserved":81147677025,
+ "uuid":"f39ad7e9-2dd5-495c-8e56-27f68afae5e7",
+ "failed":false,
+ "volumeDataDensity":-0.045543134,
+ "skip":false,
+ "readOnly":false,
+ "transient":false
+  }
+   ],
+   "storageType":"DISK",
+   "setID":"ba2f77f6-5e22-4e7b-9dd6-a9bd0a66a781",
+   "transient":false
+},
+"RAM_DISK":{
+   "volumes":[
+  {
+ "path":"/tmp/disk/JyjJn550Km",
+ "capacity":1000,
+ "storageType":"RAM_DISK",
+ "used":88789869571,
+ "reserved":3536734091,
+ "uuid":"979ad2e1-eb98-47aa-93d4-ec0ac7b6585b",
+ "failed":false,
+ "volumeDataDensity":-0.21953386,
+ "skip":false,
+ "readOnly":false,
+ "transient":true
+  },
+  {
+ "path":"/tmp/disk/lllcBYVFqp",
+ "capacity":5000,
+ "storageType":"RAM_DISK",
+ "used":112503075304,
+ "reserved":87005858853,
+ "uuid":"81fe7b93-cc01-49b7-94d9-e73112c12c0b",
+ "failed":false,
+ "volumeDataDensity":0.42851037,
+ "skip":false,
+ "readOnly":false,
+ "transient":true
+  },
+  {
+ "path":"/tmp/disk/Z7sx2FUNbz",
+ "capacity":1,
+ "storageType":"RAM_DISK",
+ "used":811474207110,
+ "reserved":64543764212,
+ "uuid":"0f83d59c-b584-4023-882f-f4a49c7c3c26",
+ "failed":false,
+ "volumeDataDensity":-0.1665448,
+ "skip":false,
+ "readOnly":false,
+ "transient":true
+  }
+   ],
+   "storageType":"RAM_DISK",
+   "setID":"87b66d63-2e4e-4842-b56c-4eba8925b547",
+   "transient":true
+},
+"SSD":{
+   "volumes":[
+  {
+ "path":"/tmp/disk/RXmrzV9NBe",
+ "capacity":4,
+ "storageType":"SSD",
+ "used":2871238941190,
+ "reserved":433979300088,
+ "uuid":"9dd322af-a681-4391-a9b1-1f4d6956543b",
+ "failed":false,
+ 

[37/49] hadoop git commit: HDFS-10476. DiskBalancer: Plan command output directory should be a sub-directory. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10476. DiskBalancer: Plan command output directory should be a 
sub-directory. Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/47dcb0f9
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/47dcb0f9
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/47dcb0f9

Branch: refs/heads/HDFS-1312
Commit: 47dcb0f95288a5e6f05480d274f1ebd8cc873ef8
Parents: 06a9799
Author: Anu Engineer 
Authored: Tue Jun 7 10:18:05 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:21:08 2016 -0700

--
 .../apache/hadoop/hdfs/server/diskbalancer/command/Command.java  | 4 ++--
 .../src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java | 4 
 2 files changed, 6 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/47dcb0f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
index feee977..fb975a8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/Command.java
@@ -163,10 +163,10 @@ public abstract class Command extends Configured {
   if (getClusterURI().getScheme().startsWith("file")) {
 diskBalancerLogs = new Path(
 System.getProperty("user.dir") + DEFAULT_LOG_DIR.toString() +
-format.format(now));
+Path.SEPARATOR + format.format(now));
   } else {
 diskBalancerLogs = new Path(DEFAULT_LOG_DIR.toString() +
-format.format(now));
+Path.SEPARATOR + format.format(now));
   }
 } else {
   diskBalancerLogs = new Path(path);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/47dcb0f9/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
index 4005652..dde2ce4 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DiskBalancer.java
@@ -216,6 +216,10 @@ public class DiskBalancer extends Configured implements 
Tool {
 Option help =
 new Option(HELP, true, "Help about a command or this message");
 opt.addOption(help);
+
+Option verbose = new Option(VERBOSE, "Print out the summary of the plan");
+opt.addOption(verbose);
+
   }
 
   /**


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[42/49] hadoop git commit: HDFS-10541. Diskbalancer: When no actions in plan, error message says "Plan was generated more than 24 hours ago". Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10541. Diskbalancer: When no actions in plan, error message says "Plan was 
generated more than 24 hours ago". Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5b8e1c26
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5b8e1c26
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5b8e1c26

Branch: refs/heads/HDFS-1312
Commit: 5b8e1c26d702e42b606265860c5e475970876aa5
Parents: cb68e5b
Author: Anu Engineer 
Authored: Fri Jun 17 23:29:57 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:00 2016 -0700

--
 .../hadoop/hdfs/server/diskbalancer/command/PlanCommand.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5b8e1c26/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
index 76bdc9f..c13399b 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/command/PlanCommand.java
@@ -140,7 +140,7 @@ public class PlanCommand extends Command {
   .getBytes(StandardCharsets.UTF_8));
 }
 
-if (plan != null) {
+if (plan != null && plan.getVolumeSetPlans().size() > 0) {
   LOG.info("Writing plan to : {}", getOutputPath());
   try (FSDataOutputStream planStream = create(String.format(
   DiskBalancer.PLAN_TEMPLATE,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[40/49] hadoop git commit: HDFS-10520. DiskBalancer: Fix Checkstyle issues in test code. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-10520. DiskBalancer: Fix Checkstyle issues in test code. Contributed by 
Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/3225c24e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/3225c24e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/3225c24e

Branch: refs/heads/HDFS-1312
Commit: 3225c24e0efb8627ea84ba23ad09859942cd81f0
Parents: 7e2be5c
Author: Anu Engineer 
Authored: Wed Jun 15 15:28:22 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:27:00 2016 -0700

--
 .../diskbalancer/DiskBalancerException.java |  3 +
 .../server/datanode/SimulatedFSDataset.java |  2 +-
 .../DiskBalancerResultVerifier.java |  3 +
 .../diskbalancer/DiskBalancerTestUtil.java  |  4 +-
 .../server/diskbalancer/TestConnectors.java | 29 +
 .../server/diskbalancer/TestDataModels.java | 23 
 .../server/diskbalancer/TestDiskBalancer.java   | 62 +++-
 .../diskbalancer/TestDiskBalancerRPC.java   | 16 +++--
 .../TestDiskBalancerWithMockMover.java  | 29 +
 .../hdfs/server/diskbalancer/TestPlanner.java   | 39 +++-
 10 files changed, 120 insertions(+), 90 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/3225c24e/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
index a55bcf3..a420b04 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
@@ -22,6 +22,9 @@ import java.io.IOException;
  * Disk Balancer Exceptions.
  */
 public class DiskBalancerException extends IOException {
+  /**
+   * Results returned by the RPC layer of DiskBalancer.
+   */
   public enum Result {
 DISK_BALANCER_NOT_ENABLED,
 INVALID_PLAN_VERSION,

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3225c24e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
index 24f4a52..0565260 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java
@@ -1362,7 +1362,7 @@ public class SimulatedFSDataset implements 
FsDatasetSpi {
 
   @Override
   public ReplicaInfo moveBlockAcrossVolumes(ExtendedBlock block,
-FsVolumeSpi destination) throws IOException {
+  FsVolumeSpi destination) throws IOException {
 return null;
   }
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3225c24e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
index 5abb33c..22367ee 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
@@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.server.diskbalancer;
 import org.hamcrest.Description;
 import org.hamcrest.TypeSafeMatcher;
 
+/**
+ * Helps in verifying test results.
+ */
 public class DiskBalancerResultVerifier
 extends TypeSafeMatcher {
   private final DiskBalancerException.Result expectedResult;

http://git-wip-us.apache.org/repos/asf/hadoop/blob/3225c24e/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerTestUtil.java
--
diff --git 

[01/49] hadoop git commit: HADOOP-13019. Implement ErasureCodec for HitchHiker XOR coding. Contributed by Kai Sasaki. [Forced Update!]

2016-06-23 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 509072312 -> b2584bee4 (forced update)


HADOOP-13019. Implement ErasureCodec for HitchHiker XOR coding. Contributed by 
Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b9edf6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b9edf6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b9edf6e

Branch: refs/heads/HDFS-1312
Commit: 0b9edf6e0f19e9d94f57f6dca41812ce8c1cc81f
Parents: dca298d
Author: Zhe Zhang 
Authored: Thu Jun 23 15:52:51 2016 -0700
Committer: Zhe Zhang 
Committed: Thu Jun 23 15:52:51 2016 -0700

--
 .../io/erasurecode/codec/HHXORErasureCodec.java | 45 
 .../codec/TestHHXORErasureCodec.java| 40 +
 2 files changed, 85 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b9edf6e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
new file mode 100644
index 000..3c8061d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.coder.ErasureCoder;
+import org.apache.hadoop.io.erasurecode.coder.HHXORErasureDecoder;
+import org.apache.hadoop.io.erasurecode.coder.HHXORErasureEncoder;
+
+/**
+ * A Hitchhiker-XOR erasure codec.
+ */
+@InterfaceAudience.Private
+public class HHXORErasureCodec extends AbstractErasureCodec {
+
+  public HHXORErasureCodec(ECSchema schema) {
+super(schema);
+  }
+
+  @Override
+  public ErasureCoder createEncoder() {
+return new HHXORErasureEncoder(getSchema());
+  }
+
+  @Override
+  public ErasureCoder createDecoder() {
+return new HHXORErasureDecoder(getSchema());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b9edf6e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
new file mode 100644
index 000..c980b87
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import 

[03/49] hadoop git commit: Fix a build break in HDFS-1312

2016-06-23 Thread arp
Fix a build break in HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/9be97037
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/9be97037
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/9be97037

Branch: refs/heads/HDFS-1312
Commit: 9be9703716d2787cd6ee0ebbbe44a18b1f039018
Parents: ec60167
Author: Anu Engineer 
Authored: Thu Mar 17 16:14:48 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hadoop/hdfs/server/diskbalancer/DiskBalancerException.java   | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/9be97037/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
index c3571c9..a55bcf3 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerException.java
@@ -33,7 +33,9 @@ public class DiskBalancerException extends IOException {
 PLAN_ALREADY_IN_PROGRESS,
 INVALID_VOLUME,
 INVALID_MOVE,
-INTERNAL_ERROR
+INTERNAL_ERROR,
+NO_SUCH_PLAN,
+UNKNOWN_KEY
   }
 
   private final Result result;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/49] hadoop git commit: HDFS-9709. DiskBalancer : Add tests for disk balancer using a Mock Mover class. Contributed by Anu Engineer.

2016-06-23 Thread arp
HDFS-9709. DiskBalancer : Add tests for disk balancer using a Mock Mover class. 
Contributed by Anu Engineer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/6c606bf5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/6c606bf5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/6c606bf5

Branch: refs/heads/HDFS-1312
Commit: 6c606bf5c8c1ace381ce73679c2be96d5475ba34
Parents: 9be9703
Author: Anu Engineer 
Authored: Tue Mar 22 16:26:49 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hdfs/server/datanode/DiskBalancer.java  |  14 +-
 .../DiskBalancerResultVerifier.java |  42 ++
 .../diskbalancer/TestDiskBalancerRPC.java   |  39 +-
 .../TestDiskBalancerWithMockMover.java  | 570 +++
 4 files changed, 628 insertions(+), 37 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c606bf5/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index d1bc1f1..972f0fc 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -40,7 +40,11 @@ import java.io.IOException;
 import java.nio.charset.Charset;
 import java.util.HashMap;
 import java.util.Map;
-import java.util.concurrent.*;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.TimeUnit;
 import java.util.concurrent.locks.ReentrantLock;
 
 /**
@@ -106,6 +110,7 @@ public class DiskBalancer {
   this.isDiskBalancerEnabled = false;
   this.currentResult = Result.NO_PLAN;
   if ((this.future != null) && (!this.future.isDone())) {
+this.currentResult = Result.PLAN_CANCELLED;
 this.blockMover.setExitFlag();
 shutdownExecutor();
   }
@@ -120,9 +125,9 @@ public class DiskBalancer {
   private void shutdownExecutor() {
 scheduler.shutdown();
 try {
-  if(!scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
+  if(!scheduler.awaitTermination(10, TimeUnit.SECONDS)) {
 scheduler.shutdownNow();
-if (!scheduler.awaitTermination(30, TimeUnit.SECONDS)) {
+if (!scheduler.awaitTermination(10, TimeUnit.SECONDS)) {
   LOG.error("Disk Balancer : Scheduler did not terminate.");
 }
   }
@@ -218,6 +223,7 @@ public class DiskBalancer {
   if (!this.future.isDone()) {
 this.blockMover.setExitFlag();
 shutdownExecutor();
+this.currentResult = Result.PLAN_CANCELLED;
   }
 } finally {
   lock.unlock();
@@ -537,7 +543,7 @@ public class DiskBalancer {
   /**
* Holds references to actual volumes that we will be operating against.
*/
-  static class VolumePair {
+  public static class VolumePair {
 private final FsVolumeSpi source;
 private final FsVolumeSpi dest;
 

http://git-wip-us.apache.org/repos/asf/hadoop/blob/6c606bf5/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
new file mode 100644
index 000..5abb33c
--- /dev/null
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/DiskBalancerResultVerifier.java
@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * 
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * 
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either 

[05/49] hadoop git commit: HDFS-9703. DiskBalancer: getBandwidth implementation. (Contributed by Anu Engineer)

2016-06-23 Thread arp
HDFS-9703. DiskBalancer: getBandwidth implementation. (Contributed by Anu 
Engineer)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/75a711a2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/75a711a2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/75a711a2

Branch: refs/heads/HDFS-1312
Commit: 75a711a2d53966361f5d5fa727b43c9fddb01504
Parents: 918722b
Author: Arpit Agarwal 
Authored: Mon Mar 14 12:57:29 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 18:18:48 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  6 +++--
 .../hdfs/server/datanode/DiskBalancer.java  | 17 +
 .../diskbalancer/TestDiskBalancerRPC.java   | 26 
 3 files changed, 37 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/75a711a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 00e124d..8a61291 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1031,7 +1031,7 @@ public class DataNode extends ReconfigurableBase
* @param  data - FSDataSet
* @param conf - Config
*/
-  private synchronized void initDiskBalancer(FsDatasetSpi data,
+  private void initDiskBalancer(FsDatasetSpi data,
  Configuration conf) {
 if (this.diskBalancer != null) {
   return;
@@ -1045,7 +1045,7 @@ public class DataNode extends ReconfigurableBase
   /**
* Shutdown disk balancer.
*/
-  private synchronized void shutdownDiskBalancer() {
+  private  void shutdownDiskBalancer() {
 if (this.diskBalancer != null) {
   this.diskBalancer.shutdown();
   this.diskBalancer = null;
@@ -3375,6 +3375,8 @@ public class DataNode extends ReconfigurableBase
 switch (key) {
 case DiskBalancerConstants.DISKBALANCER_VOLUME_NAME:
   return this.diskBalancer.getVolumeNames();
+case DiskBalancerConstants.DISKBALANCER_BANDWIDTH :
+  return Long.toString(this.diskBalancer.getBandwidth());
 default:
   LOG.error("Disk Balancer - Unknown key in get balancer setting. Key: " +
   key);

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75a711a2/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
index 9e41d2e..d1bc1f1 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DiskBalancer.java
@@ -73,6 +73,7 @@ public class DiskBalancer {
   private Future future;
   private String planID;
   private DiskBalancerWorkStatus.Result currentResult;
+  private long bandwidth;
 
   /**
* Constructs a Disk Balancer object. This object takes care of reading a
@@ -159,6 +160,7 @@ public class DiskBalancer {
   createWorkPlan(nodePlan);
   this.planID = planID;
   this.currentResult = Result.PLAN_UNDER_PROGRESS;
+  this.bandwidth = bandwidth;
   executePlan();
 } finally {
   lock.unlock();
@@ -248,6 +250,21 @@ public class DiskBalancer {
 }
   }
 
+  /**
+   * Returns the current bandwidth.
+   *
+   * @return string representation of bandwidth.
+   * @throws DiskBalancerException
+   */
+  public long getBandwidth() throws DiskBalancerException {
+lock.lock();
+try {
+  checkDiskBalancerEnabled();
+  return this.bandwidth;
+} finally {
+  lock.unlock();
+}
+  }
 
   /**
* Throws if Disk balancer is disabled.

http://git-wip-us.apache.org/repos/asf/hadoop/blob/75a711a2/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java
 

hadoop git commit: HADOOP-13019. Implement ErasureCodec for HitchHiker XOR coding. Contributed by Kai Sasaki.

2016-06-23 Thread zhz
Repository: hadoop
Updated Branches:
  refs/heads/trunk dca298d79 -> 0b9edf6e0


HADOOP-13019. Implement ErasureCodec for HitchHiker XOR coding. Contributed by 
Kai Sasaki.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/0b9edf6e
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/0b9edf6e
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/0b9edf6e

Branch: refs/heads/trunk
Commit: 0b9edf6e0f19e9d94f57f6dca41812ce8c1cc81f
Parents: dca298d
Author: Zhe Zhang 
Authored: Thu Jun 23 15:52:51 2016 -0700
Committer: Zhe Zhang 
Committed: Thu Jun 23 15:52:51 2016 -0700

--
 .../io/erasurecode/codec/HHXORErasureCodec.java | 45 
 .../codec/TestHHXORErasureCodec.java| 40 +
 2 files changed, 85 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b9edf6e/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
new file mode 100644
index 000..3c8061d
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/erasurecode/codec/HHXORErasureCodec.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.coder.ErasureCoder;
+import org.apache.hadoop.io.erasurecode.coder.HHXORErasureDecoder;
+import org.apache.hadoop.io.erasurecode.coder.HHXORErasureEncoder;
+
+/**
+ * A Hitchhiker-XOR erasure codec.
+ */
+@InterfaceAudience.Private
+public class HHXORErasureCodec extends AbstractErasureCodec {
+
+  public HHXORErasureCodec(ECSchema schema) {
+super(schema);
+  }
+
+  @Override
+  public ErasureCoder createEncoder() {
+return new HHXORErasureEncoder(getSchema());
+  }
+
+  @Override
+  public ErasureCoder createDecoder() {
+return new HHXORErasureDecoder(getSchema());
+  }
+}

http://git-wip-us.apache.org/repos/asf/hadoop/blob/0b9edf6e/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
new file mode 100644
index 000..c980b87
--- /dev/null
+++ 
b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/erasurecode/codec/TestHHXORErasureCodec.java
@@ -0,0 +1,40 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.erasurecode.codec;
+
+import org.apache.hadoop.io.erasurecode.ECSchema;
+import org.apache.hadoop.io.erasurecode.coder.ErasureCoder;

[3/3] hadoop git commit: Merge branch 'trunk' into HDFS-1312

2016-06-23 Thread aengineer
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/50907231
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/50907231
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/50907231

Branch: refs/heads/HDFS-1312
Commit: 509072312eb924ee1603a8ffb32b0573ffd35e0d
Parents: 67fec0c dca298d
Author: Anu Engineer 
Authored: Thu Jun 23 14:48:47 2016 -0700
Committer: Anu Engineer 
Committed: Thu Jun 23 14:48:47 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  5 +++-
 .../hdfs/server/datanode/DataXceiverServer.java |  4 +++
 .../datanode/metrics/DataNodeMetrics.java   | 16 ++
 .../server/datanode/TestDataNodeMetrics.java| 31 
 4 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/50907231/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[2/3] hadoop git commit: HDFS-10571. TestDiskBalancerCommand#testPlanNode failed with IllegalArgumentException. Contributed by Xiaobing Zhou.

2016-06-23 Thread aengineer
HDFS-10571. TestDiskBalancerCommand#testPlanNode failed with 
IllegalArgumentException. Contributed by Xiaobing Zhou.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/67fec0c4
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/67fec0c4
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/67fec0c4

Branch: refs/heads/HDFS-1312
Commit: 67fec0c4363a480eaaabcb1b5fbe0cbbabc19a86
Parents: ea8bfc3
Author: Anu Engineer 
Authored: Thu Jun 23 14:48:40 2016 -0700
Committer: Anu Engineer 
Committed: Thu Jun 23 14:48:40 2016 -0700

--
 .../command/TestDiskBalancerCommand.java   | 17 ++---
 1 file changed, 14 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/67fec0c4/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
index b0821e2..e55c418 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/command/TestDiskBalancerCommand.java
@@ -280,7 +280,7 @@ public class TestDiskBalancerCommand {
 final String cmdLine = String
 .format(
 "hdfs diskbalancer %s", planArg);
-runCommand(cmdLine);
+runCommand(cmdLine, cluster);
   }
 
   /* Test that illegal arguments are handled correctly*/
@@ -335,12 +335,12 @@ public class TestDiskBalancerCommand {
 runCommand(cmdLine);
   }
 
-  private List runCommand(final String cmdLine) throws Exception {
+  private List runCommandInternal(final String cmdLine) throws
+  Exception {
 String[] cmds = StringUtils.split(cmdLine, ' ');
 org.apache.hadoop.hdfs.tools.DiskBalancer db =
 new org.apache.hadoop.hdfs.tools.DiskBalancer(conf);
 
-FileSystem.setDefaultUri(conf, clusterJson);
 ByteArrayOutputStream bufOut = new ByteArrayOutputStream();
 PrintStream out = new PrintStream(bufOut);
 db.run(cmds, out);
@@ -353,6 +353,17 @@ public class TestDiskBalancerCommand {
 return outputs;
   }
 
+  private List runCommand(final String cmdLine) throws Exception {
+FileSystem.setDefaultUri(conf, clusterJson);
+return runCommandInternal(cmdLine);
+  }
+
+  private List runCommand(final String cmdLine,
+  MiniDFSCluster miniCluster) throws Exception 
{
+FileSystem.setDefaultUri(conf, miniCluster.getURI());
+return runCommandInternal(cmdLine);
+  }
+
   /**
* Making sure that we can query the node without having done a submit.
* @throws Exception


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[1/3] hadoop git commit: HDFS-10469. Add number of active xceivers to datanode metrics. Contributed by Hanisha Koneru.

2016-06-23 Thread aengineer
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 ea8bfc370 -> 509072312


HDFS-10469. Add number of active xceivers to datanode metrics. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dca298d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dca298d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dca298d7

Branch: refs/heads/HDFS-1312
Commit: dca298d79e46e27bdf008be53dd77448d7a9c0c6
Parents: e98c0c7
Author: Xiaoyu Yao 
Authored: Thu Jun 23 14:13:31 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jun 23 14:13:39 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  5 +++-
 .../hdfs/server/datanode/DataXceiverServer.java |  4 +++
 .../datanode/metrics/DataNodeMetrics.java   | 16 ++
 .../server/datanode/TestDataNodeMetrics.java| 31 
 4 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca298d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 410472e..a59a59f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1928,7 +1928,10 @@ public class DataNode extends ReconfigurableBase
   } catch (InterruptedException ie) {
   }
 }
-   
+if (metrics != null) {
+  metrics.setDataNodeActiveXceiversCount(0);
+}
+
// IPC server needs to be shutdown late in the process, otherwise
// shutdown command response won't get sent.
if (ipcServer != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca298d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index 126d5b1..4aab3f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -232,11 +232,13 @@ class DataXceiverServer implements Runnable {
 }
 peers.put(peer, t);
 peersXceiver.put(peer, xceiver);
+datanode.metrics.incrDataNodeActiveXceiversCount();
   }
 
   synchronized void closePeer(Peer peer) {
 peers.remove(peer);
 peersXceiver.remove(peer);
+datanode.metrics.decrDataNodeActiveXceiversCount();
 IOUtils.cleanup(null, peer);
   }
 
@@ -282,6 +284,7 @@ class DataXceiverServer implements Runnable {
 }
 peers.clear();
 peersXceiver.clear();
+datanode.metrics.setDataNodeActiveXceiversCount(0);
   }
 
   // Return the number of peers.
@@ -303,6 +306,7 @@ class DataXceiverServer implements Runnable {
   synchronized void releasePeer(Peer peer) {
 peers.remove(peer);
 peersXceiver.remove(peer);
+datanode.metrics.decrDataNodeActiveXceiversCount();
   }
 
   public void updateBalancerMaxConcurrentMovers(int movers) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca298d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 3d504d6..dc12787 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 import 

hadoop git commit: HDFS-10469. Add number of active xceivers to datanode metrics. Contributed by Hanisha Koneru.

2016-06-23 Thread xyao
Repository: hadoop
Updated Branches:
  refs/heads/trunk e98c0c7a1 -> dca298d79


HDFS-10469. Add number of active xceivers to datanode metrics. Contributed by 
Hanisha Koneru.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/dca298d7
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/dca298d7
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/dca298d7

Branch: refs/heads/trunk
Commit: dca298d79e46e27bdf008be53dd77448d7a9c0c6
Parents: e98c0c7
Author: Xiaoyu Yao 
Authored: Thu Jun 23 14:13:31 2016 -0700
Committer: Xiaoyu Yao 
Committed: Thu Jun 23 14:13:39 2016 -0700

--
 .../hadoop/hdfs/server/datanode/DataNode.java   |  5 +++-
 .../hdfs/server/datanode/DataXceiverServer.java |  4 +++
 .../datanode/metrics/DataNodeMetrics.java   | 16 ++
 .../server/datanode/TestDataNodeMetrics.java| 31 
 4 files changed, 55 insertions(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca298d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index 410472e..a59a59f 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -1928,7 +1928,10 @@ public class DataNode extends ReconfigurableBase
   } catch (InterruptedException ie) {
   }
 }
-   
+if (metrics != null) {
+  metrics.setDataNodeActiveXceiversCount(0);
+}
+
// IPC server needs to be shutdown late in the process, otherwise
// shutdown command response won't get sent.
if (ipcServer != null) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca298d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index 126d5b1..4aab3f8 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -232,11 +232,13 @@ class DataXceiverServer implements Runnable {
 }
 peers.put(peer, t);
 peersXceiver.put(peer, xceiver);
+datanode.metrics.incrDataNodeActiveXceiversCount();
   }
 
   synchronized void closePeer(Peer peer) {
 peers.remove(peer);
 peersXceiver.remove(peer);
+datanode.metrics.decrDataNodeActiveXceiversCount();
 IOUtils.cleanup(null, peer);
   }
 
@@ -282,6 +284,7 @@ class DataXceiverServer implements Runnable {
 }
 peers.clear();
 peersXceiver.clear();
+datanode.metrics.setDataNodeActiveXceiversCount(0);
   }
 
   // Return the number of peers.
@@ -303,6 +306,7 @@ class DataXceiverServer implements Runnable {
   synchronized void releasePeer(Peer peer) {
 peers.remove(peer);
 peersXceiver.remove(peer);
+datanode.metrics.decrDataNodeActiveXceiversCount();
   }
 
   public void updateBalancerMaxConcurrentMovers(int movers) {

http://git-wip-us.apache.org/repos/asf/hadoop/blob/dca298d7/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
index 3d504d6..dc12787 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
@@ -30,6 +30,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableQuantiles;
 import org.apache.hadoop.metrics2.lib.MutableRate;
+import org.apache.hadoop.metrics2.lib.MutableGaugeInt;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 
 import 

[09/11] hadoop git commit: Revert "HADOOP-13228. Add delegation token to the connection in DelegationTokenAuthenticator. Contributed by Xiao Chen."

2016-06-23 Thread arp
Revert "HADOOP-13228. Add delegation token to the connection in 
DelegationTokenAuthenticator. Contributed by Xiao Chen."

This reverts commit 35356de1ba1cad0fa469ff546263290109c61b77.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e98c0c7a
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e98c0c7a
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e98c0c7a

Branch: refs/heads/HDFS-1312
Commit: e98c0c7a1c2fe9380c90f7530a46752153cc37f2
Parents: 42d53e8
Author: Andrew Wang 
Authored: Thu Jun 23 11:08:02 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 23 11:08:02 2016 -0700

--
 .../DelegationTokenAuthenticationHandler.java   |   7 --
 .../web/DelegationTokenAuthenticator.java   |  19 
 .../delegation/web/TestWebDelegationToken.java  | 114 +--
 3 files changed, 3 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e98c0c7a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index 95a849f..3f191de 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -51,8 +51,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
@@ -80,9 +78,6 @@ import org.slf4j.LoggerFactory;
 public abstract class DelegationTokenAuthenticationHandler
 implements AuthenticationHandler {
 
-  private static final Logger LOG =
-  LoggerFactory.getLogger(DelegationTokenAuthenticationHandler.class);
-
   protected static final String TYPE_POSTFIX = "-dt";
 
   public static final String PREFIX = "delegation-token.";
@@ -332,8 +327,6 @@ public abstract class DelegationTokenAuthenticationHandler
   throws IOException, AuthenticationException {
 AuthenticationToken token;
 String delegationParam = getDelegationToken(request);
-LOG.debug("Authenticating with delegationParam: {}, query string: {}",
-delegationParam, request.getQueryString());
 if (delegationParam != null) {
   try {
 Token dt = new Token();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e98c0c7a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 53978a6..37ae601 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -122,24 +122,6 @@ public abstract class DelegationTokenAuthenticator 
implements Authenticator {
 return hasDt;
   }
 
-  /**
-   * Append the delegation token to the request header if needed.
-   */
-  private void appendDelegationToken(final AuthenticatedURL.Token token,
-  final Token dToken, final HttpURLConnection conn) throws IOException {
-if (token.isSet()) {
-  LOG.debug("Auth token is set, not appending delegation token.");
-  return;
-}
-if (dToken == null) {
-  LOG.warn("Delegation token is null, cannot set on request header.");
-  return;
-}
-conn.setRequestProperty(
-DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
-dToken.encodeToUrlString());
-  }
-
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
   throws IOException, AuthenticationException {
@@ -304,7 +286,6 @@ public abstract class DelegationTokenAuthenticator 
implements Authenticator {
 url = new URL(sb.toString());
 

[01/11] hadoop git commit: YARN-5171. Extend DistributedSchedulerProtocol to notify RM of containers allocated by the Node. (Inigo Goiri via asuresh)

2016-06-23 Thread arp
Repository: hadoop
Updated Branches:
  refs/heads/HDFS-1312 7034dcf9b -> ea8bfc370


YARN-5171. Extend DistributedSchedulerProtocol to notify RM of containers 
allocated by the Node. (Inigo Goiri via asuresh)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/99e5dd68
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/99e5dd68
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/99e5dd68

Branch: refs/heads/HDFS-1312
Commit: 99e5dd68d0f44109c169d74824fa45a7396a5990
Parents: 79a7289
Author: Arun Suresh 
Authored: Wed Jun 22 19:04:54 2016 -0700
Committer: Arun Suresh 
Committed: Wed Jun 22 19:04:54 2016 -0700

--
 .../api/impl/TestDistributedScheduling.java | 123 +++-
 .../yarn/api/records/impl/pb/ProtoUtils.java|  14 ++
 .../api/DistributedSchedulerProtocol.java   |   4 +-
 ...istributedSchedulerProtocolPBClientImpl.java |  17 +-
 ...stributedSchedulerProtocolPBServiceImpl.java |   7 +-
 .../DistSchedAllocateRequest.java   |  69 +++
 .../impl/pb/DistSchedAllocateRequestPBImpl.java | 185 +++
 .../proto/distributed_scheduler_protocol.proto  |   2 +-
 .../yarn_server_common_service_protos.proto |   5 +
 .../amrmproxy/AbstractRequestInterceptor.java   |  12 +-
 .../amrmproxy/DefaultRequestInterceptor.java|  11 +-
 .../nodemanager/scheduler/LocalScheduler.java   |  34 ++--
 .../scheduler/TestLocalScheduler.java   |   3 +-
 .../DistributedSchedulingService.java   |  29 ++-
 .../rmcontainer/RMContainer.java|  11 ++
 .../rmcontainer/RMContainerImpl.java|  31 
 .../scheduler/AbstractYarnScheduler.java|  18 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  20 ++
 .../TestDistributedSchedulingService.java   |  24 ++-
 19 files changed, 572 insertions(+), 47 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/99e5dd68/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
index a556aa2..c649071 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestDistributedScheduling.java
@@ -47,12 +47,15 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.Token;
 import org.apache.hadoop.yarn.client.api.AMRMClient;
+import org.apache.hadoop.yarn.client.api.AMRMClient.ContainerRequest;
 import org.apache.hadoop.yarn.client.api.YarnClient;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
 import org.apache.hadoop.yarn.server.MiniYARNCluster;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
+import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
+import 
org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
 import org.apache.hadoop.yarn.server.utils.BuilderUtils;
 import org.junit.After;
 import org.junit.Assert;
@@ -64,10 +67,14 @@ import org.mockito.stubbing.Answer;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
 import java.util.Set;
 import java.util.TreeSet;
 
 import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Matchers.any;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.when;
@@ -166,8 +173,18 @@ public class TestDistributedScheduling extends 
BaseAMRMProxyE2ETest {
 Assert.assertNotNull(responseRegister.getSchedulerResourceTypes());
 Assert.assertNotNull(responseRegister.getMaximumResourceCapability());
 
-RMApp rmApp =
-cluster.getResourceManager().getRMContext().getRMApps().get(appId);
+// Wait until the RM has been updated and verify
+Map rmApps =
+cluster.getResourceManager().getRMContext().getRMApps();
+boolean rmUpdated = false;
+for (int i=0; i<10 && !rmUpdated; i++) {
+  

[05/11] hadoop git commit: YARN-5278. Remove unused argument in TestRMWebServicesForCSWithPartitions#setupQueueConfiguration. Contributed by Tao Jie.

2016-06-23 Thread arp
YARN-5278. Remove unused argument in 
TestRMWebServicesForCSWithPartitions#setupQueueConfiguration. Contributed by 
Tao Jie.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/a8a48c91
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/a8a48c91
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/a8a48c91

Branch: refs/heads/HDFS-1312
Commit: a8a48c91250c1562ea510149fc84876e8d3a5610
Parents: 0164302
Author: Akira Ajisaka 
Authored: Thu Jun 23 14:28:12 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 14:28:12 2016 +0900

--
 .../webapp/TestRMWebServicesForCSWithPartitions.java | 4 ++--
 1 file changed, 2 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/a8a48c91/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java
--
diff --git 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java
 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java
index 046cf8a..29a38d9 100644
--- 
a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java
+++ 
b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesForCSWithPartitions.java
@@ -87,7 +87,7 @@ public class TestRMWebServicesForCSWithPartitions extends 
JerseyTestBase {
   bind(RMWebServices.class);
   bind(GenericExceptionHandler.class);
   csConf = new CapacitySchedulerConfiguration();
-  setupQueueConfiguration(csConf, rm);
+  setupQueueConfiguration(csConf);
   conf = new YarnConfiguration(csConf);
   conf.setClass(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class,
   ResourceScheduler.class);
@@ -113,7 +113,7 @@ public class TestRMWebServicesForCSWithPartitions extends 
JerseyTestBase {
   }
 
   private static void setupQueueConfiguration(
-  CapacitySchedulerConfiguration config, ResourceManager resourceManager) {
+  CapacitySchedulerConfiguration config) {
 
 // Define top-level queues
 config.setQueues(CapacitySchedulerConfiguration.ROOT,


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[08/11] hadoop git commit: addendum commit to close HADOOP-9613's PR. (ozawa)

2016-06-23 Thread arp
addendum commit to close HADOOP-9613's PR. (ozawa)

This closes #76


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42d53e80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42d53e80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42d53e80

Branch: refs/heads/HDFS-1312
Commit: 42d53e806ecaa6bacf5218c401ecf32b079ea334
Parents: e6cb075
Author: Tsuyoshi Ozawa 
Authored: Thu Jun 23 05:54:39 2016 -0700
Committer: Tsuyoshi Ozawa 
Committed: Thu Jun 23 05:56:25 2016 -0700

--

--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[11/11] hadoop git commit: HDFS-10460. Recompute block checksum for a particular range less than file size on the fly by reconstructing missed block. Contributed by Rakesh R

2016-06-23 Thread arp
HDFS-10460. Recompute block checksum for a particular range less than file size 
on the fly by reconstructing missed block. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6cb0752
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6cb0752
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6cb0752

Branch: refs/heads/HDFS-1312
Commit: e6cb07520f935efde3e881de8f84ee7f6e0a746f
Parents: ff07b10
Author: Kai Zheng 
Authored: Fri Jun 24 17:39:32 2016 +0800
Committer: Kai Zheng 
Committed: Fri Jun 24 17:39:32 2016 +0800

--
 .../apache/hadoop/hdfs/FileChecksumHelper.java  |  13 +-
 .../datatransfer/DataTransferProtocol.java  |   5 +-
 .../hdfs/protocol/datatransfer/Sender.java  |   4 +-
 .../src/main/proto/datatransfer.proto   |   1 +
 .../hdfs/protocol/datatransfer/Receiver.java|   3 +-
 .../server/datanode/BlockChecksumHelper.java|  81 +++--
 .../hdfs/server/datanode/DataXceiver.java   |   5 +-
 .../StripedBlockChecksumReconstructor.java  |  69 -
 .../apache/hadoop/hdfs/TestFileChecksum.java| 309 +--
 9 files changed, 415 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6cb0752/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
index c213fa3..fe462f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
@@ -454,10 +454,11 @@ final class FileChecksumHelper {
 private boolean checksumBlockGroup(
 LocatedStripedBlock blockGroup) throws IOException {
   ExtendedBlock block = blockGroup.getBlock();
+  long requestedNumBytes = block.getNumBytes();
   if (getRemaining() < block.getNumBytes()) {
-block.setNumBytes(getRemaining());
+requestedNumBytes = getRemaining();
   }
-  setRemaining(getRemaining() - block.getNumBytes());
+  setRemaining(getRemaining() - requestedNumBytes);
 
   StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(block,
   blockGroup.getLocations(), blockGroup.getBlockTokens(),
@@ -468,7 +469,8 @@ final class FileChecksumHelper {
   boolean done = false;
   for (int j = 0; !done && j < datanodes.length; j++) {
 try {
-  tryDatanode(blockGroup, stripedBlockInfo, datanodes[j]);
+  tryDatanode(blockGroup, stripedBlockInfo, datanodes[j],
+  requestedNumBytes);
   done = true;
 } catch (InvalidBlockTokenException ibte) {
   if (bgIdx > getLastRetriedIndex()) {
@@ -496,7 +498,8 @@ final class FileChecksumHelper {
  */
 private void tryDatanode(LocatedStripedBlock blockGroup,
  StripedBlockInfo stripedBlockInfo,
- DatanodeInfo datanode) throws IOException {
+ DatanodeInfo datanode,
+ long requestedNumBytes) throws IOException {
 
   try (IOStreamPair pair = getClient().connectToDN(datanode,
   getTimeout(), blockGroup.getBlockToken())) {
@@ -506,7 +509,7 @@ final class FileChecksumHelper {
 
 // get block MD5
 createSender(pair).blockGroupChecksum(stripedBlockInfo,
-blockGroup.getBlockToken());
+blockGroup.getBlockToken(), requestedNumBytes);
 
 BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(
 PBHelperClient.vintPrefixed(pair.in));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6cb0752/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
index ad3f2ad..94f8906 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
@@ -207,8 +207,11 @@ public interface DataTransferProtocol {
*

[04/11] hadoop git commit: HDFS-10555: Unable to loadFSEdits due to a failure in readCachePoolInfo. Contributed by Uma Maheswara Rao G

2016-06-23 Thread arp
HDFS-10555: Unable to loadFSEdits due to a failure in readCachePoolInfo. 
Contributed by Uma Maheswara Rao G


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/01643020
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/01643020
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/01643020

Branch: refs/heads/HDFS-1312
Commit: 01643020d24b606c26cb2dcaefdb6e47a5f06c8f
Parents: 6ab5aa1
Author: Uma Maheswara Rao G 
Authored: Wed Jun 22 22:06:13 2016 -0700
Committer: Uma Maheswara Rao G 
Committed: Wed Jun 22 22:06:13 2016 -0700

--
 .../apache/hadoop/hdfs/server/namenode/FSImageSerialization.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/01643020/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index 06ac6a7..5f6c96a 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -683,7 +683,7 @@ public class FSImageSerialization {
 if ((flags & 0x20) != 0) {
   info.setDefaultReplication(readShort(in));
 }
-if ((flags & ~0x2F) != 0) {
+if ((flags & ~0x3F) != 0) {
   throw new IOException("Unknown flag in CachePoolInfo: " + flags);
 }
 return info;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[06/11] hadoop git commit: HADOOP-13307. add rsync to Dockerfile so that precommit archive works. Contributed by Allen Wittenauer.

2016-06-23 Thread arp
HADOOP-13307. add rsync to Dockerfile so that precommit archive works. 
Contributed by Allen Wittenauer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85209cc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85209cc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85209cc5

Branch: refs/heads/HDFS-1312
Commit: 85209cc59152bcd7070b0a654af7d64105d36a6b
Parents: a8a48c9
Author: Akira Ajisaka 
Authored: Thu Jun 23 15:11:58 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:11:58 2016 +0900

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85209cc5/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 82edc86..38dbc8a 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -59,6 +59,7 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 python \
 python2.7 \
 python-pip \
+rsync \
 snappy \
 zlib1g-dev
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[02/11] hadoop git commit: MAPREDUCE-6641. TestTaskAttempt fails in trunk. Contributed by Haibo Chen.

2016-06-23 Thread arp
MAPREDUCE-6641. TestTaskAttempt fails in trunk. Contributed by Haibo Chen.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/69c21d2d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/69c21d2d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/69c21d2d

Branch: refs/heads/HDFS-1312
Commit: 69c21d2db099b3a4bb612fe2876fb448471829b3
Parents: 99e5dd6
Author: Akira Ajisaka 
Authored: Thu Jun 23 13:25:25 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 13:26:10 2016 +0900

--
 .../hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java   | 5 +++--
 1 file changed, 3 insertions(+), 2 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/69c21d2d/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
--
diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
index 1af60d8..61b780e 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.mapreduce.v2.app.job.impl;
 
+import static org.apache.hadoop.test.GenericTestUtils.waitFor;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
@@ -391,8 +392,8 @@ public class TestTaskAttempt{
 Map attempts = task.getAttempts();
 TaskAttempt attempt = attempts.values().iterator().next();
 app.waitForState(attempt, TaskAttemptState.KILLED);
-Assert.assertTrue("No Ta Started JH Event", app.getTaStartJHEvent());
-Assert.assertTrue("No Ta Killed JH Event", app.getTaKilledJHEvent());
+waitFor(app::getTaStartJHEvent, 100, 800);
+waitFor(app::getTaKilledJHEvent, 100, 800);
   }
 
   static class FailingAttemptsMRApp extends MRApp {


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[07/11] hadoop git commit: HDFS-10561. test_native_mini_dfs fails by NoClassDefFoundError. (aajisaka)

2016-06-23 Thread arp
HDFS-10561. test_native_mini_dfs fails by NoClassDefFoundError. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff07b108
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff07b108
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff07b108

Branch: refs/heads/HDFS-1312
Commit: ff07b108039ecc4e680352f756930673ced42256
Parents: 85209cc
Author: Akira Ajisaka 
Authored: Thu Jun 23 15:47:31 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:48:18 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff07b108/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 23fefcb..36fdb35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -68,6 +68,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   mockito-all
   test
 
+
+  junit
+  junit
+  test
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



[10/11] hadoop git commit: Merge branch 'trunk' into HDFS-1312

2016-06-23 Thread arp
Merge branch 'trunk' into HDFS-1312


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ea8bfc37
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ea8bfc37
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ea8bfc37

Branch: refs/heads/HDFS-1312
Commit: ea8bfc3704d174b31565441193d0b75bdef607c4
Parents: 7034dcf e98c0c7
Author: Arpit Agarwal 
Authored: Thu Jun 23 11:31:10 2016 -0700
Committer: Arpit Agarwal 
Committed: Thu Jun 23 11:31:10 2016 -0700

--
 dev-support/docker/Dockerfile   |   1 +
 .../DelegationTokenAuthenticationHandler.java   |   7 -
 .../web/DelegationTokenAuthenticator.java   |  19 --
 .../delegation/web/TestWebDelegationToken.java  | 114 +--
 .../apache/hadoop/hdfs/FileChecksumHelper.java  |  13 +-
 .../datatransfer/DataTransferProtocol.java  |   5 +-
 .../hdfs/protocol/datatransfer/Sender.java  |   4 +-
 .../src/main/proto/datatransfer.proto   |   1 +
 .../hadoop-hdfs-native-client/pom.xml   |   5 +
 .../hdfs/protocol/datatransfer/Receiver.java|   3 +-
 .../server/datanode/BlockChecksumHelper.java|  81 +++--
 .../hdfs/server/datanode/DataXceiver.java   |   5 +-
 .../StripedBlockChecksumReconstructor.java  |  69 -
 .../server/namenode/FSImageSerialization.java   |   2 +-
 .../apache/hadoop/hdfs/TestFileChecksum.java| 309 +--
 .../v2/app/job/impl/TestTaskAttempt.java|   5 +-
 .../apache/hadoop/yarn/client/cli/LogsCLI.java  |  62 ++--
 .../api/impl/TestDistributedScheduling.java | 123 +++-
 .../hadoop/yarn/client/cli/TestLogsCLI.java |  24 +-
 .../yarn/api/records/impl/pb/ProtoUtils.java|  14 +
 .../yarn/logaggregation/LogCLIHelpers.java  |  12 +-
 .../api/DistributedSchedulerProtocol.java   |   4 +-
 ...istributedSchedulerProtocolPBClientImpl.java |  17 +-
 ...stributedSchedulerProtocolPBServiceImpl.java |   7 +-
 .../DistSchedAllocateRequest.java   |  69 +
 .../impl/pb/DistSchedAllocateRequestPBImpl.java | 185 +++
 .../proto/distributed_scheduler_protocol.proto  |   2 +-
 .../yarn_server_common_service_protos.proto |   5 +
 .../amrmproxy/AbstractRequestInterceptor.java   |  12 +-
 .../amrmproxy/DefaultRequestInterceptor.java|  11 +-
 .../nodemanager/scheduler/LocalScheduler.java   |  34 +-
 .../scheduler/TestLocalScheduler.java   |   3 +-
 .../DistributedSchedulingService.java   |  29 +-
 .../rmcontainer/RMContainer.java|  11 +
 .../rmcontainer/RMContainerImpl.java|  31 ++
 .../scheduler/AbstractYarnScheduler.java|  18 +-
 .../scheduler/SchedulerApplicationAttempt.java  |  20 ++
 .../TestDistributedSchedulingService.java   |  24 +-
 .../TestRMWebServicesForCSWithPartitions.java   |   4 +-
 39 files changed, 1061 insertions(+), 303 deletions(-)
--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: Revert "HADOOP-13228. Add delegation token to the connection in DelegationTokenAuthenticator. Contributed by Xiao Chen."

2016-06-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 5c07aee0b -> f28c270c9


Revert "HADOOP-13228. Add delegation token to the connection in 
DelegationTokenAuthenticator. Contributed by Xiao Chen."

This reverts commit 4ad9ca85fbde69369264f089c0195f90232dc3bd.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/f28c270c
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/f28c270c
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/f28c270c

Branch: refs/heads/branch-2.8
Commit: f28c270c96564ea828b87e6a811ee2ee57556bb2
Parents: 5c07aee
Author: Andrew Wang 
Authored: Thu Jun 23 11:09:06 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 23 11:09:06 2016 -0700

--
 .../DelegationTokenAuthenticationHandler.java   |   7 --
 .../web/DelegationTokenAuthenticator.java   |  19 
 .../delegation/web/TestWebDelegationToken.java  | 114 +--
 3 files changed, 3 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/f28c270c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index 95a849f..3f191de 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -51,8 +51,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
@@ -80,9 +78,6 @@ import org.slf4j.LoggerFactory;
 public abstract class DelegationTokenAuthenticationHandler
 implements AuthenticationHandler {
 
-  private static final Logger LOG =
-  LoggerFactory.getLogger(DelegationTokenAuthenticationHandler.class);
-
   protected static final String TYPE_POSTFIX = "-dt";
 
   public static final String PREFIX = "delegation-token.";
@@ -332,8 +327,6 @@ public abstract class DelegationTokenAuthenticationHandler
   throws IOException, AuthenticationException {
 AuthenticationToken token;
 String delegationParam = getDelegationToken(request);
-LOG.debug("Authenticating with delegationParam: {}, query string: {}",
-delegationParam, request.getQueryString());
 if (delegationParam != null) {
   try {
 Token dt = new Token();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/f28c270c/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 53978a6..37ae601 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -122,24 +122,6 @@ public abstract class DelegationTokenAuthenticator 
implements Authenticator {
 return hasDt;
   }
 
-  /**
-   * Append the delegation token to the request header if needed.
-   */
-  private void appendDelegationToken(final AuthenticatedURL.Token token,
-  final Token dToken, final HttpURLConnection conn) throws IOException {
-if (token.isSet()) {
-  LOG.debug("Auth token is set, not appending delegation token.");
-  return;
-}
-if (dToken == null) {
-  LOG.warn("Delegation token is null, cannot set on request header.");
-  return;
-}
-conn.setRequestProperty(
-DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
-dToken.encodeToUrlString());
-  }
-
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
   throws IOException, AuthenticationException {
@@ -304,7 +286,6 @@ public abstract class 

hadoop git commit: Revert "HADOOP-13228. Add delegation token to the connection in DelegationTokenAuthenticator. Contributed by Xiao Chen."

2016-06-23 Thread wang
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 742f9083f -> 8a1ee4d2b


Revert "HADOOP-13228. Add delegation token to the connection in 
DelegationTokenAuthenticator. Contributed by Xiao Chen."

This reverts commit 55eda958ccbf3941986d6da63ad7ee16646d7067.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/8a1ee4d2
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/8a1ee4d2
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/8a1ee4d2

Branch: refs/heads/branch-2
Commit: 8a1ee4d2b5c49c6bfaa47f12e568b863efa2203c
Parents: 742f908
Author: Andrew Wang 
Authored: Thu Jun 23 11:08:52 2016 -0700
Committer: Andrew Wang 
Committed: Thu Jun 23 11:08:52 2016 -0700

--
 .../DelegationTokenAuthenticationHandler.java   |   7 --
 .../web/DelegationTokenAuthenticator.java   |  19 
 .../delegation/web/TestWebDelegationToken.java  | 114 +--
 3 files changed, 3 insertions(+), 137 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1ee4d2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
index 95a849f..3f191de 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticationHandler.java
@@ -51,8 +51,6 @@ import org.apache.hadoop.util.StringUtils;
 import org.codehaus.jackson.map.ObjectMapper;
 
 import com.google.common.annotations.VisibleForTesting;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 /**
  * An {@link AuthenticationHandler} that implements Kerberos SPNEGO mechanism
@@ -80,9 +78,6 @@ import org.slf4j.LoggerFactory;
 public abstract class DelegationTokenAuthenticationHandler
 implements AuthenticationHandler {
 
-  private static final Logger LOG =
-  LoggerFactory.getLogger(DelegationTokenAuthenticationHandler.class);
-
   protected static final String TYPE_POSTFIX = "-dt";
 
   public static final String PREFIX = "delegation-token.";
@@ -332,8 +327,6 @@ public abstract class DelegationTokenAuthenticationHandler
   throws IOException, AuthenticationException {
 AuthenticationToken token;
 String delegationParam = getDelegationToken(request);
-LOG.debug("Authenticating with delegationParam: {}, query string: {}",
-delegationParam, request.getQueryString());
 if (delegationParam != null) {
   try {
 Token dt = new Token();

http://git-wip-us.apache.org/repos/asf/hadoop/blob/8a1ee4d2/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
--
diff --git 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
index 53978a6..37ae601 100644
--- 
a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
+++ 
b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/web/DelegationTokenAuthenticator.java
@@ -122,24 +122,6 @@ public abstract class DelegationTokenAuthenticator 
implements Authenticator {
 return hasDt;
   }
 
-  /**
-   * Append the delegation token to the request header if needed.
-   */
-  private void appendDelegationToken(final AuthenticatedURL.Token token,
-  final Token dToken, final HttpURLConnection conn) throws IOException {
-if (token.isSet()) {
-  LOG.debug("Auth token is set, not appending delegation token.");
-  return;
-}
-if (dToken == null) {
-  LOG.warn("Delegation token is null, cannot set on request header.");
-  return;
-}
-conn.setRequestProperty(
-DelegationTokenAuthenticator.DELEGATION_TOKEN_HEADER,
-dToken.encodeToUrlString());
-  }
-
   @Override
   public void authenticate(URL url, AuthenticatedURL.Token token)
   throws IOException, AuthenticationException {
@@ -304,7 +286,6 @@ public abstract class 

[Hadoop Wiki] Update of "HowToCommit" by AkiraAjisaka

2016-06-23 Thread Apache Wiki
Dear Wiki user,

You have subscribed to a wiki page or wiki category on "Hadoop Wiki" for change 
notification.

The "HowToCommit" page has been changed by AkiraAjisaka:
https://wiki.apache.org/hadoop/HowToCommit?action=diff=36=37

Comment:
Committer need to update CHANGES.txt when backporting to branch-2.7 or older 
branches.

   1. '''Push changes to remote repo:''' Build and run a test to ensure it is 
all still kosher. Push the changes to the remote (main) repo using {{{git push 
 }}}.
   1. '''Backporting to other branches:''' If the changes were to trunk, we 
might want to apply them to other appropriate branches. 
  1. Cherry-pick the changes to other appropriate branches via {{{git 
cherry-pick -x }}}. The -x option records the source commit, and 
reuses the original commit message. Resolve any conflicts.
- 1. If the conflicts are major, it is preferable to produce a new patch 
for that branch, review it separately and commit it. When committing an edited 
patch to other branches, please follow the same steps and make sure to include 
the JIRA number and description of changes in the commit message. 
+ 1. If the conflicts are major, it is preferable to produce a new patch 
for that branch, review it separately and commit it. When committing an edited 
patch to other branches, please follow the same steps and make sure to include 
the JIRA number and description of changes in the commit message.
+ 1. When backporting to branch-2.7 or older branches, we need to update 
CHANGES.txt.
   1. Resolve the issue as fixed, thanking the contributor.  Always set the 
"Fix Version" at this point, but please only set a single fix version, the 
earliest release in which the change will appear. '''Special case'''- when 
committing to a ''non-mainline'' branch (such as branch-0.22 or branch-0.23 
ATM), please set fix-version to either 2.x.x or 3.x.x appropriately too.
   1. Set the assignee if it is not set. If you cannot set the contributor to 
the assignee, you need to add the contributor into Contributors role in the 
project. Please see [[#Roles|Adding Contributors role]] for the detail.
  

-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10555: Unable to loadFSEdits due to a failure in readCachePoolInfo. Contributed by Uma Maheswara Rao G

2016-06-23 Thread kihwal
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 96dca785f -> 742f9083f


HDFS-10555: Unable to loadFSEdits due to a failure in readCachePoolInfo. 
Contributed by Uma Maheswara Rao G

(cherry picked from commit 01643020d24b606c26cb2dcaefdb6e47a5f06c8f)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/742f9083
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/742f9083
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/742f9083

Branch: refs/heads/branch-2
Commit: 742f9083f695c0cc7b21020f41d9da0e72da04f6
Parents: 96dca78
Author: Kihwal Lee 
Authored: Thu Jun 23 09:19:49 2016 -0500
Committer: Kihwal Lee 
Committed: Thu Jun 23 09:19:49 2016 -0500

--
 .../apache/hadoop/hdfs/server/namenode/FSImageSerialization.java   | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/742f9083/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
index c584628..8df65f7 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageSerialization.java
@@ -681,7 +681,7 @@ public class FSImageSerialization {
 if ((flags & 0x20) != 0) {
   info.setDefaultReplication(readShort(in));
 }
-if ((flags & ~0x2F) != 0) {
+if ((flags & ~0x3F) != 0) {
   throw new IOException("Unknown flag in CachePoolInfo: " + flags);
 }
 return info;


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: addendum commit to close HADOOP-9613's PR. (ozawa)

2016-06-23 Thread ozawa
Repository: hadoop
Updated Branches:
  refs/heads/trunk e6cb07520 -> 42d53e806


addendum commit to close HADOOP-9613's PR. (ozawa)

This closes #76


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/42d53e80
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/42d53e80
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/42d53e80

Branch: refs/heads/trunk
Commit: 42d53e806ecaa6bacf5218c401ecf32b079ea334
Parents: e6cb075
Author: Tsuyoshi Ozawa 
Authored: Thu Jun 23 05:54:39 2016 -0700
Committer: Tsuyoshi Ozawa 
Committed: Thu Jun 23 05:56:25 2016 -0700

--

--



-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10460. Recompute block checksum for a particular range less than file size on the fly by reconstructing missed block. Contributed by Rakesh R

2016-06-23 Thread drankye
Repository: hadoop
Updated Branches:
  refs/heads/trunk ff07b1080 -> e6cb07520


HDFS-10460. Recompute block checksum for a particular range less than file size 
on the fly by reconstructing missed block. Contributed by Rakesh R


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/e6cb0752
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/e6cb0752
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/e6cb0752

Branch: refs/heads/trunk
Commit: e6cb07520f935efde3e881de8f84ee7f6e0a746f
Parents: ff07b10
Author: Kai Zheng 
Authored: Fri Jun 24 17:39:32 2016 +0800
Committer: Kai Zheng 
Committed: Fri Jun 24 17:39:32 2016 +0800

--
 .../apache/hadoop/hdfs/FileChecksumHelper.java  |  13 +-
 .../datatransfer/DataTransferProtocol.java  |   5 +-
 .../hdfs/protocol/datatransfer/Sender.java  |   4 +-
 .../src/main/proto/datatransfer.proto   |   1 +
 .../hdfs/protocol/datatransfer/Receiver.java|   3 +-
 .../server/datanode/BlockChecksumHelper.java|  81 +++--
 .../hdfs/server/datanode/DataXceiver.java   |   5 +-
 .../StripedBlockChecksumReconstructor.java  |  69 -
 .../apache/hadoop/hdfs/TestFileChecksum.java| 309 +--
 9 files changed, 415 insertions(+), 75 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6cb0752/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
index c213fa3..fe462f2 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
+++ 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/FileChecksumHelper.java
@@ -454,10 +454,11 @@ final class FileChecksumHelper {
 private boolean checksumBlockGroup(
 LocatedStripedBlock blockGroup) throws IOException {
   ExtendedBlock block = blockGroup.getBlock();
+  long requestedNumBytes = block.getNumBytes();
   if (getRemaining() < block.getNumBytes()) {
-block.setNumBytes(getRemaining());
+requestedNumBytes = getRemaining();
   }
-  setRemaining(getRemaining() - block.getNumBytes());
+  setRemaining(getRemaining() - requestedNumBytes);
 
   StripedBlockInfo stripedBlockInfo = new StripedBlockInfo(block,
   blockGroup.getLocations(), blockGroup.getBlockTokens(),
@@ -468,7 +469,8 @@ final class FileChecksumHelper {
   boolean done = false;
   for (int j = 0; !done && j < datanodes.length; j++) {
 try {
-  tryDatanode(blockGroup, stripedBlockInfo, datanodes[j]);
+  tryDatanode(blockGroup, stripedBlockInfo, datanodes[j],
+  requestedNumBytes);
   done = true;
 } catch (InvalidBlockTokenException ibte) {
   if (bgIdx > getLastRetriedIndex()) {
@@ -496,7 +498,8 @@ final class FileChecksumHelper {
  */
 private void tryDatanode(LocatedStripedBlock blockGroup,
  StripedBlockInfo stripedBlockInfo,
- DatanodeInfo datanode) throws IOException {
+ DatanodeInfo datanode,
+ long requestedNumBytes) throws IOException {
 
   try (IOStreamPair pair = getClient().connectToDN(datanode,
   getTimeout(), blockGroup.getBlockToken())) {
@@ -506,7 +509,7 @@ final class FileChecksumHelper {
 
 // get block MD5
 createSender(pair).blockGroupChecksum(stripedBlockInfo,
-blockGroup.getBlockToken());
+blockGroup.getBlockToken(), requestedNumBytes);
 
 BlockOpResponseProto reply = BlockOpResponseProto.parseFrom(
 PBHelperClient.vintPrefixed(pair.in));

http://git-wip-us.apache.org/repos/asf/hadoop/blob/e6cb0752/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
--
diff --git 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
 
b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
index ad3f2ad..94f8906 100644
--- 
a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java
+++ 

hadoop git commit: HDFS-10561. test_native_mini_dfs fails by NoClassDefFoundError. (aajisaka)

2016-06-23 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 fb13ab072 -> 96dca785f


HDFS-10561. test_native_mini_dfs fails by NoClassDefFoundError. (aajisaka)

(cherry picked from commit ff07b108039ecc4e680352f756930673ced42256)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/96dca785
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/96dca785
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/96dca785

Branch: refs/heads/branch-2
Commit: 96dca785fa17287fb3e558b279e26c5f3388dab8
Parents: fb13ab0
Author: Akira Ajisaka 
Authored: Thu Jun 23 15:47:31 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:49:55 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/96dca785/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 172cd4e..be82010 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -68,6 +68,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   mockito-all
   test
 
+
+  junit
+  junit
+  test
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HDFS-10561. test_native_mini_dfs fails by NoClassDefFoundError. (aajisaka)

2016-06-23 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk 85209cc59 -> ff07b1080


HDFS-10561. test_native_mini_dfs fails by NoClassDefFoundError. (aajisaka)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/ff07b108
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/ff07b108
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/ff07b108

Branch: refs/heads/trunk
Commit: ff07b108039ecc4e680352f756930673ced42256
Parents: 85209cc
Author: Akira Ajisaka 
Authored: Thu Jun 23 15:47:31 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:48:18 2016 +0900

--
 hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml | 5 +
 1 file changed, 5 insertions(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/ff07b108/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
--
diff --git a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml 
b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
index 23fefcb..36fdb35 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-native-client/pom.xml
@@ -68,6 +68,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd;>
   mockito-all
   test
 
+
+  junit
+  junit
+  test
+
   
 
   


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13307. add rsync to Dockerfile so that precommit archive works. Contributed by Allen Wittenauer.

2016-06-23 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d4474d7d0 -> 5c07aee0b


HADOOP-13307. add rsync to Dockerfile so that precommit archive works. 
Contributed by Allen Wittenauer.

(cherry picked from commit 85209cc59152bcd7070b0a654af7d64105d36a6b)
(cherry picked from commit fb13ab07270067062998381b238ba9def87aa304)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5c07aee0
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5c07aee0
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5c07aee0

Branch: refs/heads/branch-2.8
Commit: 5c07aee0b014e9ceb5aeb6f451402308596c1afb
Parents: d4474d7
Author: Akira Ajisaka 
Authored: Thu Jun 23 15:11:58 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:24:22 2016 +0900

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/5c07aee0/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 1fbb09b..226487c 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -57,6 +57,7 @@ RUN apt-get update && apt-get install --no-install-recommends 
-y \
 python \
 python2.7 \
 pylint \
+rsync \
 snappy \
 zlib1g-dev
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13307. add rsync to Dockerfile so that precommit archive works. Contributed by Allen Wittenauer.

2016-06-23 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/trunk a8a48c912 -> 85209cc59


HADOOP-13307. add rsync to Dockerfile so that precommit archive works. 
Contributed by Allen Wittenauer.


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/85209cc5
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/85209cc5
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/85209cc5

Branch: refs/heads/trunk
Commit: 85209cc59152bcd7070b0a654af7d64105d36a6b
Parents: a8a48c9
Author: Akira Ajisaka 
Authored: Thu Jun 23 15:11:58 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:11:58 2016 +0900

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/85209cc5/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 82edc86..38dbc8a 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -59,6 +59,7 @@ RUN apt-get -q update && apt-get -q install 
--no-install-recommends -y \
 python \
 python2.7 \
 python-pip \
+rsync \
 snappy \
 zlib1g-dev
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13307. add rsync to Dockerfile so that precommit archive works. Contributed by Allen Wittenauer.

2016-06-23 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 2a956baaa -> fb13ab072


HADOOP-13307. add rsync to Dockerfile so that precommit archive works. 
Contributed by Allen Wittenauer.

(cherry picked from commit 85209cc59152bcd7070b0a654af7d64105d36a6b)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/fb13ab07
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/fb13ab07
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/fb13ab07

Branch: refs/heads/branch-2
Commit: fb13ab07270067062998381b238ba9def87aa304
Parents: 2a956ba
Author: Akira Ajisaka 
Authored: Thu Jun 23 15:11:58 2016 +0900
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:24:10 2016 +0900

--
 dev-support/docker/Dockerfile | 1 +
 1 file changed, 1 insertion(+)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/fb13ab07/dev-support/docker/Dockerfile
--
diff --git a/dev-support/docker/Dockerfile b/dev-support/docker/Dockerfile
index 1fbb09b..226487c 100644
--- a/dev-support/docker/Dockerfile
+++ b/dev-support/docker/Dockerfile
@@ -57,6 +57,7 @@ RUN apt-get update && apt-get install --no-install-recommends 
-y \
 python \
 python2.7 \
 pylint \
+rsync \
 snappy \
 zlib1g-dev
 


-
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org



hadoop git commit: HADOOP-13245. Fix up some misc create-release issues (aw)

2016-06-23 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2.8 d17265f46 -> d4474d7d0


HADOOP-13245. Fix up some misc create-release issues (aw)

(cherry picked from commit e2f640942b722e35490cf146c0268517da5a28b1)
(cherry picked from commit 2a956baaabe4a86a63fc181c3a07b2f4c4ee2a75)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/d4474d7d
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/d4474d7d
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/d4474d7d

Branch: refs/heads/branch-2.8
Commit: d4474d7d094616851e011e58e4d81210f601cc26
Parents: d17265f
Author: Allen Wittenauer 
Authored: Tue Jun 14 16:03:20 2016 -0700
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:16:33 2016 +0900

--
 dev-support/bin/create-release  | 69 ++--
 dev-support/docker/Dockerfile   |  5 ++
 hadoop-common-project/hadoop-common/pom.xml |  3 +-
 pom.xml |  2 +
 4 files changed, 62 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/d4474d7d/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 8d7f0a8..422ff49 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -240,6 +240,8 @@ function set_defaults
   OSNAME=$(uname -s)
 
   PUBKEYFILE="https://dist.apache.org/repos/dist/release/hadoop/common/KEYS;
+
+  SIGN=false
 }
 
 function startgpgagent
@@ -247,11 +249,23 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  touch "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 7200" > "${LOGDIR}/gpgagent.conf"
+  # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \
---log-file=${LOGDIR}/create-release-gpgagent.log)
-  GPGAGENTPID=$(echo ${GPG_AGENT_INFO} | cut -f 2 -d:)
+--log-file="${LOGDIR}/create-release-gpgagent.log")
+  GPGAGENTPID=$(echo "${GPG_AGENT_INFO}" | cut -f 2 -d:)
+fi
+
+if [[ -n "${GPG_AGENT_INFO}" ]]; then
+  echo "Warming the gpg-agent cache prior to calling maven"
+  # warm the agent's cache:
+  touch "${LOGDIR}/warm"
+  ${GPG} --use-agent --armor --output "${LOGDIR}/warm.asc" --detach-sig 
"${LOGDIR}/warm"
+  rm "${LOGDIR}/warm.asc" "${LOGDIR}/warm"
+else
+  SIGN=false
+  hadoop_error "ERROR: Unable to launch or acquire gpg-agent. Disable 
signing."
 fi
   fi
 }
@@ -259,7 +273,7 @@ function startgpgagent
 function stopgpgagent
 {
   if [[ -n "${GPGAGENTPID}" ]]; then
-kill ${GPGAGENTPID}
+kill "${GPGAGENTPID}"
   fi
 }
 
@@ -273,7 +287,7 @@ function usage
   echo "--mvncache=[path]   Path to the maven cache to use"
   echo "--nativeAlso build the native components"
   echo "--rc-label=[label]  Add this label to the builds"
-  echo "--sign  Use .gnupg dir to sign the jars"
+  echo "--sign  Use .gnupg dir to sign the artifacts and jars"
   echo "--version=[version] Use an alternative version string"
 }
 
@@ -330,6 +344,16 @@ function option_parse
 SIGN=false
   fi
 
+  if [[ "${SIGN}" = true ]]; then
+if [[ -n "${GPG_AGENT_INFO}" ]]; then
+  echo "NOTE: Using existing gpg-agent. If the default-cache-ttl"
+  echo "is set to less than ~20 mins, maven commands will fail."
+elif [[ -z "${GPGAGENT}" ]]; then
+  hadoop_error "ERROR: No gpg-agent. Disabling signing capability."
+  SIGN=false
+fi
+  fi
+
   DOCKERCMD=$(command -v docker)
   if [[ "${DOCKER}" = true && -z "${DOCKERCMD}" ]]; then
   hadoop_error "ERROR: docker binary not found. Disabling docker mode."
@@ -439,6 +463,11 @@ function dockermode
 # make sure we put some space between, just in case last
 # line isn't an empty line or whatever
 printf "\n\n"
+
+# force a new image for every run to make it easier to remove later
+echo "LABEL org.apache.hadoop.create-release=\"cr-${RANDOM}\""
+
+# setup ownerships, etc
 echo "RUN groupadd --non-unique -g ${group_id} ${user_name}"
 echo "RUN useradd -g ${group_id} -u ${user_id} -m ${user_name}"
 echo "RUN chown -R ${user_name} /home/${user_name}"
@@ -490,19 +519,27 @@ function makearelease
 
   big_console_header "Maven Build and Install"
 
+  if [[ "${SIGN}" = true ]]; then
+signflags=("-Psign" "-Dgpg.useagent=true" -Dgpg.executable="${GPG}")
+  fi
+
   # Create SRC and BIN tarballs for release,
-  # Using 'install’ goal instead of 'package' so artifacts are available
-  # in the Maven local cache for 

hadoop git commit: HADOOP-13245. Fix up some misc create-release issues (aw)

2016-06-23 Thread aajisaka
Repository: hadoop
Updated Branches:
  refs/heads/branch-2 b7f18f281 -> 2a956baaa


HADOOP-13245. Fix up some misc create-release issues (aw)

(cherry picked from commit e2f640942b722e35490cf146c0268517da5a28b1)


Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo
Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/2a956baa
Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/2a956baa
Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/2a956baa

Branch: refs/heads/branch-2
Commit: 2a956baaabe4a86a63fc181c3a07b2f4c4ee2a75
Parents: b7f18f2
Author: Allen Wittenauer 
Authored: Tue Jun 14 16:03:20 2016 -0700
Committer: Akira Ajisaka 
Committed: Thu Jun 23 15:15:23 2016 +0900

--
 dev-support/bin/create-release  | 69 ++--
 dev-support/docker/Dockerfile   |  5 ++
 hadoop-common-project/hadoop-common/pom.xml |  3 +-
 pom.xml |  2 +
 4 files changed, 62 insertions(+), 17 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/hadoop/blob/2a956baa/dev-support/bin/create-release
--
diff --git a/dev-support/bin/create-release b/dev-support/bin/create-release
index 8d7f0a8..422ff49 100755
--- a/dev-support/bin/create-release
+++ b/dev-support/bin/create-release
@@ -240,6 +240,8 @@ function set_defaults
   OSNAME=$(uname -s)
 
   PUBKEYFILE="https://dist.apache.org/repos/dist/release/hadoop/common/KEYS;
+
+  SIGN=false
 }
 
 function startgpgagent
@@ -247,11 +249,23 @@ function startgpgagent
   if [[ "${SIGN}" = true ]]; then
 if [[ -n "${GPGAGENT}" && -z "${GPG_AGENT_INFO}" ]]; then
   echo "starting gpg agent"
-  touch "${LOGDIR}/gpgagent.conf"
+  echo "default-cache-ttl 7200" > "${LOGDIR}/gpgagent.conf"
+  # shellcheck disable=2046
   eval $("${GPGAGENT}" --daemon \
 --options "${LOGDIR}/gpgagent.conf" \
---log-file=${LOGDIR}/create-release-gpgagent.log)
-  GPGAGENTPID=$(echo ${GPG_AGENT_INFO} | cut -f 2 -d:)
+--log-file="${LOGDIR}/create-release-gpgagent.log")
+  GPGAGENTPID=$(echo "${GPG_AGENT_INFO}" | cut -f 2 -d:)
+fi
+
+if [[ -n "${GPG_AGENT_INFO}" ]]; then
+  echo "Warming the gpg-agent cache prior to calling maven"
+  # warm the agent's cache:
+  touch "${LOGDIR}/warm"
+  ${GPG} --use-agent --armor --output "${LOGDIR}/warm.asc" --detach-sig 
"${LOGDIR}/warm"
+  rm "${LOGDIR}/warm.asc" "${LOGDIR}/warm"
+else
+  SIGN=false
+  hadoop_error "ERROR: Unable to launch or acquire gpg-agent. Disable 
signing."
 fi
   fi
 }
@@ -259,7 +273,7 @@ function startgpgagent
 function stopgpgagent
 {
   if [[ -n "${GPGAGENTPID}" ]]; then
-kill ${GPGAGENTPID}
+kill "${GPGAGENTPID}"
   fi
 }
 
@@ -273,7 +287,7 @@ function usage
   echo "--mvncache=[path]   Path to the maven cache to use"
   echo "--nativeAlso build the native components"
   echo "--rc-label=[label]  Add this label to the builds"
-  echo "--sign  Use .gnupg dir to sign the jars"
+  echo "--sign  Use .gnupg dir to sign the artifacts and jars"
   echo "--version=[version] Use an alternative version string"
 }
 
@@ -330,6 +344,16 @@ function option_parse
 SIGN=false
   fi
 
+  if [[ "${SIGN}" = true ]]; then
+if [[ -n "${GPG_AGENT_INFO}" ]]; then
+  echo "NOTE: Using existing gpg-agent. If the default-cache-ttl"
+  echo "is set to less than ~20 mins, maven commands will fail."
+elif [[ -z "${GPGAGENT}" ]]; then
+  hadoop_error "ERROR: No gpg-agent. Disabling signing capability."
+  SIGN=false
+fi
+  fi
+
   DOCKERCMD=$(command -v docker)
   if [[ "${DOCKER}" = true && -z "${DOCKERCMD}" ]]; then
   hadoop_error "ERROR: docker binary not found. Disabling docker mode."
@@ -439,6 +463,11 @@ function dockermode
 # make sure we put some space between, just in case last
 # line isn't an empty line or whatever
 printf "\n\n"
+
+# force a new image for every run to make it easier to remove later
+echo "LABEL org.apache.hadoop.create-release=\"cr-${RANDOM}\""
+
+# setup ownerships, etc
 echo "RUN groupadd --non-unique -g ${group_id} ${user_name}"
 echo "RUN useradd -g ${group_id} -u ${user_id} -m ${user_name}"
 echo "RUN chown -R ${user_name} /home/${user_name}"
@@ -490,19 +519,27 @@ function makearelease
 
   big_console_header "Maven Build and Install"
 
+  if [[ "${SIGN}" = true ]]; then
+signflags=("-Psign" "-Dgpg.useagent=true" -Dgpg.executable="${GPG}")
+  fi
+
   # Create SRC and BIN tarballs for release,
-  # Using 'install’ goal instead of 'package' so artifacts are available
-  # in the Maven local cache for the site generation
-  #
   # shellcheck disable=SC2046
   run_and_redirect