yandrey321 commented on code in PR #9258:
URL: https://github.com/apache/ozone/pull/9258#discussion_r2833754231


##########
hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconTasksV2MultiNode.java:
##########
@@ -0,0 +1,222 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.recon;
+
+import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
+import static 
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_PIPELINE_REPORT_INTERVAL;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+
+import java.time.Duration;
+import java.util.List;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineManager;
+import org.apache.hadoop.ozone.MiniOzoneCluster;
+import 
org.apache.hadoop.ozone.recon.persistence.ContainerHealthSchemaManagerV2.UnhealthyContainerRecordV2;
+import org.apache.hadoop.ozone.recon.scm.ReconContainerManager;
+import org.apache.hadoop.ozone.recon.scm.ReconStorageContainerManagerFacade;
+import org.apache.hadoop.ozone.recon.tasks.ReconTaskConfig;
+import org.apache.ozone.recon.schema.ContainerSchemaDefinitionV2;
+import org.apache.ozone.test.LambdaTestUtils;
+import org.junit.jupiter.api.Test;
+
+/**
+ * Integration tests for ContainerHealthTaskV2 with multi-node clusters.
+ *
+ * These tests are separate from TestReconTasks because they require
+ * different cluster configurations (3 datanodes) and would conflict
+ * with the @BeforeEach/@AfterEach setup in that class.
+ */
+public class TestReconTasksV2MultiNode {
+
+  /**
+   * Test that ContainerHealthTaskV2 can query UNDER_REPLICATED containers.
+   * Steps:
+   * 1. Create a cluster with 3 datanodes
+   * 2. Verify the query mechanism for UNDER_REPLICATED state works
+   *
+   * Note: Creating actual under-replication scenarios in integration tests
+   * requires containers to have data written to them before physical replicas
+   * are created on datanodes. This is complex to set up properly.
+   *
+   * In production, under-replication occurs when:
+   * 1. A datanode goes down or becomes unreachable
+   * 2. A datanode's disk fails
+   * 3. Network partitions occur
+   * 4. Datanodes are decommissioned
+   *
+   * The detection logic is tested end-to-end in:
+   * - TestReconTasks.testContainerHealthTaskV2WithSCMSync() - which proves
+   *   Recon's RM logic works for MISSING containers (similar detection logic)
+   *
+   * Full end-to-end test for UNDER_REPLICATED would require:
+   * 1. Allocate container with RF=3
+   * 2. Write actual data to container (creates physical replicas)
+   * 3. Shut down 1 datanode
+   * 4. Wait for SCM to mark datanode as dead (stale/dead intervals)
+   * 5. Wait for ContainerHealthTaskV2 to run (task interval)
+   * 6. Verify UNDER_REPLICATED state in V2 table with correct replica counts
+   * 7. Restart datanode and verify container becomes healthy
+   */
+  @Test
+  public void testContainerHealthTaskV2UnderReplicated() throws Exception {
+    // Create a cluster with 3 datanodes
+    OzoneConfiguration testConf = new OzoneConfiguration();
+    testConf.set(HDDS_CONTAINER_REPORT_INTERVAL, "5s");
+    testConf.set(HDDS_PIPELINE_REPORT_INTERVAL, "5s");
+
+    ReconTaskConfig taskConfig = testConf.getObject(ReconTaskConfig.class);
+    taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(10));
+    testConf.setFromObject(taskConfig);
+
+    testConf.set("ozone.scm.stale.node.interval", "6s");
+    testConf.set("ozone.scm.dead.node.interval", "8s");
+
+    ReconService testRecon = new ReconService(testConf);
+    MiniOzoneCluster testCluster = MiniOzoneCluster.newBuilder(testConf)
+        .setNumDatanodes(3)
+        .addService(testRecon)
+        .build();
+
+    try {
+      testCluster.waitForClusterToBeReady();
+      testCluster.waitForPipelineTobeReady(
+          HddsProtos.ReplicationFactor.THREE, 60000);
+
+      ReconStorageContainerManagerFacade reconScm =
+          (ReconStorageContainerManagerFacade)
+              testRecon.getReconServer().getReconStorageContainerManager();
+
+      PipelineManager reconPipelineManager = reconScm.getPipelineManager();
+
+      // Make sure Recon's pipeline state is initialized
+      LambdaTestUtils.await(60000, 5000,
+          () -> (!reconPipelineManager.getPipelines().isEmpty()));
+
+      ReconContainerManager reconContainerManager =
+          (ReconContainerManager) reconScm.getContainerManager();
+
+      // Verify the query mechanism for UNDER_REPLICATED state works
+      List<UnhealthyContainerRecordV2> underReplicatedContainers =
+          reconContainerManager.getContainerSchemaManagerV2()
+              .getUnhealthyContainers(
+                  
ContainerSchemaDefinitionV2.UnHealthyContainerStates.UNDER_REPLICATED,
+                  0L, 0L, 1000);
+
+      // Should be empty in normal operation (all replicas healthy)
+      assertEquals(0, underReplicatedContainers.size());
+
+    } finally {
+      if (testCluster != null) {
+        testCluster.shutdown();
+      }
+    }
+  }
+
+  /**
+   * Test that ContainerHealthTaskV2 detects OVER_REPLICATED containers.
+   * Steps:
+   * 1. Create a cluster with 3 datanodes
+   * 2. Allocate a container with replication factor 1
+   * 3. Write data to the container
+   * 4. Manually add the container to additional datanodes to create 
over-replication
+   * 5. Verify ContainerHealthTaskV2 detects OVER_REPLICATED state in V2 table
+   *
+   * Note: Creating over-replication scenarios is complex in integration tests
+   * as it requires manipulating the container replica state artificially.
+   * This test demonstrates the detection capability when over-replication 
occurs.
+   */
+  @Test
+  public void testContainerHealthTaskV2OverReplicated() throws Exception {
+    // Create a cluster with 3 datanodes
+    OzoneConfiguration testConf = new OzoneConfiguration();
+    testConf.set(HDDS_CONTAINER_REPORT_INTERVAL, "5s");
+    testConf.set(HDDS_PIPELINE_REPORT_INTERVAL, "5s");
+
+    ReconTaskConfig taskConfig = testConf.getObject(ReconTaskConfig.class);
+    taskConfig.setMissingContainerTaskInterval(Duration.ofSeconds(10));
+    testConf.setFromObject(taskConfig);
+
+    testConf.set("ozone.scm.stale.node.interval", "6s");
+    testConf.set("ozone.scm.dead.node.interval", "8s");
+
+    ReconService testRecon = new ReconService(testConf);
+    MiniOzoneCluster testCluster = MiniOzoneCluster.newBuilder(testConf)
+        .setNumDatanodes(3)
+        .addService(testRecon)
+        .build();
+
+    try {
+      testCluster.waitForClusterToBeReady();
+      testCluster.waitForPipelineTobeReady(
+          HddsProtos.ReplicationFactor.ONE, 60000);
+
+      ReconStorageContainerManagerFacade reconScm =
+          (ReconStorageContainerManagerFacade)
+              testRecon.getReconServer().getReconStorageContainerManager();
+
+      PipelineManager reconPipelineManager = reconScm.getPipelineManager();
+
+      // Make sure Recon's pipeline state is initialized
+      LambdaTestUtils.await(60000, 5000,
+          () -> (!reconPipelineManager.getPipelines().isEmpty()));
+
+      ReconContainerManager reconContainerManager =
+          (ReconContainerManager) reconScm.getContainerManager();
+
+      // Note: Creating over-replication in integration tests is challenging
+      // as it requires artificially adding extra replicas. In production,
+      // over-replication can occur when:
+      // 1. A dead datanode comes back online with old replicas
+      // 2. Replication commands create extra replicas before cleanup
+      // 3. Manual intervention or bugs cause duplicate replicas
+      //
+      // For now, this test verifies the detection mechanism exists.
+      // If over-replication is detected in the future, the V2 table
+      // should contain the record with proper replica counts.
+
+      // The actual over-replication detection would look like this:
+      // LambdaTestUtils.await(120000, 6000, () -> {
+      //   List<UnhealthyContainerRecordV2> overReplicatedContainers =

Review Comment:
   please remove commented code.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to