adoroszlai commented on code in PR #5391:
URL: https://github.com/apache/ozone/pull/5391#discussion_r1468509251


##########
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestGetClusterTreeInformation.java:
##########
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.hdds.scm.net.InnerNode;
+import org.apache.hadoop.hdds.scm.node.NodeManager;
+import 
org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.hdds.scm.proxy.SCMBlockLocationFailoverProxyProvider;
+import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.Assertions;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import java.io.IOException;
+import java.util.concurrent.TimeoutException;
+
+/**
+ *
+ * This class is to test the serialization/deserialization of cluster tree
+ * information from SCM.
+ */
+public class TestGetClusterTreeInformation {
+
+  public static final Logger LOG =
+      LoggerFactory.getLogger(TestGetClusterTreeInformation.class);
+  private static int numOfDatanodes = 3;
+  private static MiniOzoneCluster cluster;
+  private static OzoneConfiguration conf;
+  private static StorageContainerManager scm;
+  private static NodeManager nodeManager;
+  private static ClassLoader classLoader =
+      Thread.currentThread().getContextClassLoader();
+
+  @BeforeAll
+  public static void init() throws IOException, TimeoutException,
+      InterruptedException {
+    conf = new OzoneConfiguration();
+    cluster = MiniOzoneCluster.newBuilder(conf)
+        .setNumDatanodes(numOfDatanodes)
+        .setNumOfOzoneManagers(3)
+        .setNumOfStorageContainerManagers(3)
+        .build();
+    cluster.waitForClusterToBeReady();
+    scm = cluster.getStorageContainerManager();
+  }
+
+  @AfterAll
+  public static void shutdown() {
+    if (cluster != null) {
+      cluster.shutdown();
+    }
+  }
+
+  @Test
+  public void testGetClusterTreeInformation() throws IOException {
+    SCMBlockLocationFailoverProxyProvider failoverProxyProvider =
+        new SCMBlockLocationFailoverProxyProvider(conf);
+    failoverProxyProvider.changeCurrentProxy(scm.getSCMNodeId());
+    ScmBlockLocationProtocolClientSideTranslatorPB scmBlockLocationClient =
+        new ScmBlockLocationProtocolClientSideTranslatorPB(
+            failoverProxyProvider);
+
+    InnerNode expectedInnerNode = scm.getClusterMap().getClusterTree();
+    InnerNode actualInnerNode = scmBlockLocationClient.getClusterTree();
+    Assertions.assertEquals(expectedInnerNode, actualInnerNode);

Review Comment:
   nit: please import `assertEquals`



##########
hadoop-hdds/common/pom.xml:
##########
@@ -206,6 +206,10 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd";>
       <artifactId>junit-platform-launcher</artifactId>
       <scope>test</scope>
     </dependency>
+    <dependency>
+      <groupId>org.apache.ozone</groupId>
+      <artifactId>hdds-interface-server</artifactId>
+    </dependency>

Review Comment:
   Adding dependency in `hdds-common` on the server-only interface brings it to 
the client-side, which makes the separation of `hdds-interface-*` useless.



##########
hadoop-hdds/interface-server/src/main/proto/ScmServerProtocol.proto:
##########
@@ -230,6 +233,60 @@ message SortDatanodesResponseProto{
   repeated DatanodeDetailsProto node = 1;
 }
 
+message NodeImpl {
+  required string name = 1;
+  required string location = 2;
+  required uint32 cost = 3;
+}
+
+message CommandCount {
+  required SCMCommandProto.Type command = 1;
+  required uint32 count = 2;
+}
+
+message NodeStatus {
+  required NodeOperationalState operationalState = 1;
+  required NodeState health = 2;
+  required uint64 opStateExpiryEpochSeconds = 3;
+}
+
+message DatanodeInfo {
+  required DatanodeDetailsProto datanodeDetails = 1;
+  required uint64 lastHeartbeatTime = 2;
+  required uint64 lastStatsUpdatedTime = 3;
+  required uint32 failedVolumeCount = 4;
+  repeated StorageReportProto storageReportProto = 5;
+  repeated MetadataStorageReportProto metadataStorageReportProto = 6;
+  required LayoutVersionProto layoutVersionProto = 7;
+  repeated CommandCount commandCount = 8;
+  required NodeStatus nodeStatus = 9;
+}
+
+message NodeType {
+  optional DatanodeDetailsProto datanodeDetails = 1;
+  optional DatanodeInfo datanodeInfo = 2;

Review Comment:
   I think the question was intended to highlight that `NodeType` has 
`DatanodeDetailsProto` twice: one directly, one inside `DatanodeInfo`.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to