devmadhuu commented on code in PR #6376:
URL: https://github.com/apache/ozone/pull/6376#discussion_r1601772094


##########
hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/api/NodeEndpoint.java:
##########
@@ -325,4 +340,111 @@ private void checkContainers(DatanodeDetails nodeByUuid, 
AtomicBoolean isContain
           }
         });
   }
+
+  /**
+   * This GET API provides the information of all datanodes for which 
decommissioning is initiated.
+   * @return the wrapped  Response output
+   */
+  @GET
+  @Path("/decommission/info")
+  public Response getDatanodesDecommissionInfo() {
+    try {
+      return getDecommissionStatusResponse(null, null);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  /**
+   * This GET API provides the information of a specific datanode for which 
decommissioning is initiated.
+   * API accepts both uuid or ipAddress, uuid will be given preference if both 
provided.
+   * @return the wrapped  Response output
+   */
+  @GET
+  @Path("/decommission/info/datanode")
+  public Response getDecommissionInfoForDatanode(@QueryParam("uuid") String 
uuid,
+                                                 @QueryParam("ipAddress") 
String ipAddress) {
+    if (StringUtils.isEmpty(uuid)) {
+      Preconditions.checkNotNull(ipAddress, "Either uuid or ipAddress of a 
datanode should be provided !!!");
+      Preconditions.checkArgument(!ipAddress.isEmpty(),
+          "Either uuid or ipAddress of a datanode should be provided !!!");
+    }
+    try {
+      return getDecommissionStatusResponse(uuid, ipAddress);
+    } catch (IOException e) {
+      throw new RuntimeException(e);
+    }
+  }
+
+  private Response getDecommissionStatusResponse(String uuid, String 
ipAddress) throws IOException {
+    Response.ResponseBuilder builder = Response.status(Response.Status.OK);
+    Map<String, Object> responseMap = new HashMap<>();
+    Stream<HddsProtos.Node> allNodes = scmClient.queryNode(DECOMMISSIONING,
+        null, HddsProtos.QueryScope.CLUSTER, "", 
ClientVersion.CURRENT_VERSION).stream();
+    List<HddsProtos.Node> decommissioningNodes = 
HddsUtils.getDecommissioningNodesList(allNodes, uuid, ipAddress);
+    String metricsJson = 
scmClient.getMetrics("Hadoop:service=StorageContainerManager,name=NodeDecommissionMetrics");
+    int numDecomNodes = -1;
+    JsonNode jsonNode = null;
+    if (metricsJson != null) {
+      jsonNode = HddsUtils.getBeansJsonNode(metricsJson);
+      numDecomNodes = HddsUtils.getNumDecomNodes(jsonNode);
+    }
+    List<Map<String, Object>> dnDecommissionInfo =
+        getDecommissioningNodesDetails(decommissioningNodes, jsonNode, 
numDecomNodes);
+    try {
+      responseMap.put("DatanodesDecommissionInfo", dnDecommissionInfo);
+      builder.entity(responseMap);
+      return builder.build();
+    } catch (Exception exception) {
+      LOG.error("Unexpected Error: {}", exception);
+      throw new WebApplicationException(exception, 
Response.Status.INTERNAL_SERVER_ERROR);
+    }
+  }
+
+  private List<Map<String, Object>> 
getDecommissioningNodesDetails(List<HddsProtos.Node> decommissioningNodes,
+                                                                   JsonNode 
jsonNode,
+                                                                   int 
numDecomNodes) throws IOException {
+    List<Map<String, Object>> decommissioningNodesDetails = new ArrayList<>();
+
+    for (HddsProtos.Node node : decommissioningNodes) {
+      DatanodeDetails datanode = DatanodeDetails.getFromProtoBuf(
+          node.getNodeID());
+      Map<String, Object> datanodeMap = new LinkedHashMap<>();
+      datanodeMap.put("datanodeDetails", datanode);
+      datanodeMap.put("metrics", getCounts(datanode, jsonNode, numDecomNodes));
+      datanodeMap.put("containers", getContainers(datanode));
+      decommissioningNodesDetails.add(datanodeMap);
+    }
+    return decommissioningNodesDetails;
+  }
+
+  private Map<String, Object> getCounts(DatanodeDetails datanode, JsonNode 
counts, int numDecomNodes) {
+    Map<String, Object> countsMap = new LinkedHashMap<>();
+    String errMsg = getErrorMessage() + datanode.getHostName();
+    try {
+      countsMap = HddsUtils.getCountsMap(datanode, counts, numDecomNodes, 
countsMap, errMsg);
+      if (countsMap != null) {
+        return countsMap;
+      }
+      LOG.error(errMsg);

Review Comment:
   > is there any error message for success case?
   No,  this is not success case. For success case. it is being returned from 
Line 427 itself.
   



##########
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java:
##########
@@ -870,4 +881,68 @@ public static HddsProtos.UUID toProtobuf(UUID uuid) {
         ? Thread.currentThread().getStackTrace()
         : null;
   }
+
+
+
+  public static List<HddsProtos.Node> 
getDecommissioningNodesList(Stream<HddsProtos.Node> allNodes,

Review Comment:
   Ok done.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to