HDFS-9645. DiskBalancer: Add Query RPC. (Contributed by Anu Engineer)
Project: http://git-wip-us.apache.org/repos/asf/hadoop/repo Commit: http://git-wip-us.apache.org/repos/asf/hadoop/commit/5cc54068 Tree: http://git-wip-us.apache.org/repos/asf/hadoop/tree/5cc54068 Diff: http://git-wip-us.apache.org/repos/asf/hadoop/diff/5cc54068 Branch: refs/heads/HDFS-1312 Commit: 5cc540683162ca3d0992597fc41d5315fef4d67d Parents: 637acb6 Author: Arpit Agarwal <[email protected]> Authored: Wed Jan 20 10:47:30 2016 -0800 Committer: Arpit Agarwal <[email protected]> Committed: Wed Jan 20 10:47:30 2016 -0800 ---------------------------------------------------------------------- .../hdfs/protocol/ClientDatanodeProtocol.java | 6 ++ .../ClientDatanodeProtocolTranslatorPB.java | 22 +++++ .../hadoop/hdfs/server/datanode/WorkStatus.java | 85 ++++++++++++++++++++ .../src/main/proto/ClientDatanodeProtocol.proto | 26 ++++++ .../hadoop-hdfs/HDFS-1312_CHANGES.txt | 2 + ...tDatanodeProtocolServerSideTranslatorPB.java | 23 ++++++ .../hadoop/hdfs/server/datanode/DataNode.java | 5 ++ .../diskbalancer/TestDiskBalancerRPC.java | 48 ++++++++++- 8 files changed, 214 insertions(+), 3 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java index 2ad3c69..e2f2491 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector; import org.apache.hadoop.security.KerberosInfo; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenInfo; +import org.apache.hadoop.hdfs.server.datanode.WorkStatus; /** An client-datanode protocol for block recovery */ @@ -170,4 +171,9 @@ public interface ClientDatanodeProtocol { */ void cancelDiskBalancePlan(String planID) throws IOException; + + /** + * Gets the status of an executing diskbalancer Plan. + */ + WorkStatus queryDiskBalancerPlan() throws IOException; } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index 2ce654a..2331fa8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -53,7 +53,10 @@ import org.apache.hadoop.hdfs.protocol.proto.ReconfigurationProtocolProtos.Start import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.TriggerBlockReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto; import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier; +import org.apache.hadoop.hdfs.server.datanode.WorkStatus; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.ProtocolMetaInterface; @@ -365,4 +368,23 @@ public class ClientDatanodeProtocolTranslatorPB implements throw ProtobufHelper.getRemoteException(e); } } + + /** + * Gets the status of an executing diskbalancer Plan. + */ + @Override + public WorkStatus queryDiskBalancerPlan() throws IOException { + try { + QueryPlanStatusRequestProto request = + QueryPlanStatusRequestProto.newBuilder().build(); + QueryPlanStatusResponseProto response = + rpcProxy.queryDiskBalancerPlan(NULL_CONTROLLER, request); + return new WorkStatus(response.hasResult() ? response.getResult() : 0, + response.hasPlanID() ? response.getPlanID() : null, + response.hasStatus() ? response.getStatus() : null, + response.hasCurrentStatus() ? response.getCurrentStatus() : null); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/WorkStatus.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/WorkStatus.java b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/WorkStatus.java new file mode 100644 index 0000000..259a311 --- /dev/null +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/server/datanode/WorkStatus.java @@ -0,0 +1,85 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + */ + +package org.apache.hadoop.hdfs.server.datanode; + +import org.apache.hadoop.classification.InterfaceAudience; + +/** + * Helper class that reports how much work has has been done by the node. + */ [email protected] +public class WorkStatus { + private int result; + private String planID; + private String status; + private String currentState; + + /** + * Constructs a workStatus Object. + * + * @param result - int + * @param planID - Plan ID + * @param status - Current Status + * @param currentState - Current State + */ + public WorkStatus(int result, String planID, String status, + String currentState) { + this.result = result; + this.planID = planID; + this.status = status; + this.currentState = currentState; + } + + /** + * Returns result. + * + * @return long + */ + public int getResult() { + return result; + } + + /** + * Returns planID. + * + * @return String + */ + public String getPlanID() { + return planID; + } + + /** + * Returns Status. + * + * @return String + */ + public String getStatus() { + return status; + } + + /** + * Gets current Status. + * + * @return - Json String + */ + public String getCurrentState() { + return currentState; + } +} http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto index f7df982..5187fa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientDatanodeProtocol.proto @@ -173,6 +173,26 @@ message CancelPlanRequestProto { message CancelPlanResponseProto { } + +/** + * This message allows a client to query data node to see + * if a disk balancer plan is executing and if so what is + * the status. + */ +message QueryPlanStatusRequestProto { +} + +/** + * This message describes a plan if it is in progress + */ +message QueryPlanStatusResponseProto { + optional uint32 result = 1; + optional string status = 2; + optional string planID = 3; + optional string currentStatus = 4; + +} + /** * Protocol used from client to the Datanode. * See the request and response for details of rpc call. @@ -239,4 +259,10 @@ service ClientDatanodeProtocolService { */ rpc cancelDiskBalancerPlan(CancelPlanRequestProto) returns (CancelPlanResponseProto); + + /** + * Gets the status of an executing Plan + */ + rpc queryDiskBalancerPlan(QueryPlanStatusRequestProto) + returns (QueryPlanStatusResponseProto); } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt index 8ceb45b..c6a5554 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/HDFS-1312_CHANGES.txt @@ -19,3 +19,5 @@ HDFS-1312 Change Log HDFS-9595. DiskBalancer: Add cancelPlan RPC. (Anu Engineer via Arpit Agarwal) + HDFS-9645. DiskBalancer: Add Query RPC. (Anu Engineer via Arpit Agarwal) + http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java index eacb590..98e3d0b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolServerSideTranslatorPB.java @@ -49,9 +49,12 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.Submit import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.SubmitDiskBalancerPlanResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.CancelPlanResponseProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientDatanodeProtocolProtos.QueryPlanStatusResponseProto; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; +import org.apache.hadoop.hdfs.server.datanode.WorkStatus; /** * Implementation for protobuf service that forwards requests @@ -266,4 +269,24 @@ public class ClientDatanodeProtocolServerSideTranslatorPB implements } } + /** + * Gets the status of an executing Plan. + */ + @Override + public QueryPlanStatusResponseProto queryDiskBalancerPlan( + RpcController controller, QueryPlanStatusRequestProto request) + throws ServiceException { + try { + WorkStatus result = impl.queryDiskBalancerPlan(); + return QueryPlanStatusResponseProto + .newBuilder() + .setResult(result.getResult()) + .setPlanID(result.getPlanID()) + .setStatus(result.getStatus()) + .setCurrentStatus(result.getCurrentState()) + .build(); + } catch (Exception e) { + throw new ServiceException(e); + } + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index 4450c04..7e2f378 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -3245,4 +3245,9 @@ public class DataNode extends ReconfigurableBase throw new DiskbalancerException("Not Implemented", 0); } + @Override + public WorkStatus queryDiskBalancerPlan() throws IOException { + checkSuperuserPrivilege(); + throw new DiskbalancerException("Not Implemented", 0); + } } http://git-wip-us.apache.org/repos/asf/hadoop/blob/5cc54068/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java ---------------------------------------------------------------------- diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java index 35d3f91..a127816 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/diskbalancer/TestDiskBalancerRPC.java @@ -35,6 +35,7 @@ import org.junit.Rule; import org.junit.Test; import org.junit.rules.ExpectedException; +import java.io.IOException; import java.net.URI; public class TestDiskBalancerRPC { @@ -43,6 +44,7 @@ public class TestDiskBalancerRPC { private MiniDFSCluster cluster; private Configuration conf; + @Before public void setUp() throws Exception { conf = new HdfsConfiguration(); @@ -113,11 +115,51 @@ public class TestDiskBalancerRPC { // Since submitDiskBalancerPlan is not implemented yet, it throws an // Exception, this will be modified with the actual implementation. - thrown.expect(DiskbalancerException.class); - dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, plan.toJson()); - + try { + dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, plan.toJson()); + } catch (DiskbalancerException ex) { + // Let us ignore this for time being. + } thrown.expect(DiskbalancerException.class); dataNode.cancelDiskBalancePlan(planHash); } + + @Test + public void TestQueryTestRpc() throws Exception { + final int dnIndex = 0; + cluster.restartDataNode(dnIndex); + cluster.waitActive(); + ClusterConnector nameNodeConnector = + ConnectorFactory.getCluster(cluster.getFileSystem(0).getUri(), conf); + + DiskBalancerCluster diskBalancerCluster = new DiskBalancerCluster + (nameNodeConnector); + diskBalancerCluster.readClusterInfo(); + Assert.assertEquals(cluster.getDataNodes().size(), + diskBalancerCluster.getNodes().size()); + diskBalancerCluster.setNodesToProcess(diskBalancerCluster.getNodes()); + DiskBalancerDataNode node = diskBalancerCluster.getNodes().get(0); + GreedyPlanner planner = new GreedyPlanner(10.0f, node); + NodePlan plan = new NodePlan(node.getDataNodeName(), node.getDataNodePort + ()); + planner.balanceVolumeSet(node, node.getVolumeSets().get("DISK"), plan); + + final int planVersion = 0; // So far we support only one version. + DataNode dataNode = cluster.getDataNodes().get(dnIndex); + String planHash = DigestUtils.sha512Hex(plan.toJson()); + + // Since submitDiskBalancerPlan is not implemented yet, it throws an + // Exception, this will be modified with the actual implementation. + try { + dataNode.submitDiskBalancerPlan(planHash, planVersion, 10, plan.toJson()); + } catch (DiskbalancerException ex) { + // Let us ignore this for time being. + } + + // TODO : This will be fixed when we have implementation for this + // function in server side. + thrown.expect(DiskbalancerException.class); + dataNode.queryDiskBalancerPlan(); + } }
