This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 9fb61ffa3b HDDS-10413. Recon - UnsupportedOperationException while
merging Incremental Container Reports. (#6260)
9fb61ffa3b is described below
commit 9fb61ffa3b79a7cdcb2d33da81c46b1fa55d5445
Author: Devesh Kumar Singh <[email protected]>
AuthorDate: Wed Feb 28 02:35:59 2024 +0530
HDDS-10413. Recon - UnsupportedOperationException while merging Incremental
Container Reports. (#6260)
Co-authored-by: deveshsingh <devmadhu@[email protected]>
---
.../scm/server/SCMDatanodeHeartbeatDispatcher.java | 14 +++++++----
.../apache/hadoop/ozone/recon/TestReconUtils.java | 25 ++++++++++++++++++++
...TestReconIncrementalContainerReportHandler.java | 27 ++++++++++++++++++++++
3 files changed, 62 insertions(+), 4 deletions(-)
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
index b6dc6f599b..484a1e6f0f 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeHeartbeatDispatcher.java
@@ -213,7 +213,7 @@ public final class SCMDatanodeHeartbeatDispatcher {
private final DatanodeDetails datanodeDetails;
- private final T report;
+ private T report;
public ReportFromDatanode(DatanodeDetails datanodeDetails, T report) {
this.datanodeDetails = datanodeDetails;
@@ -227,6 +227,10 @@ public final class SCMDatanodeHeartbeatDispatcher {
public T getReport() {
return report;
}
+
+ public void setReport(T report) {
+ this.report = report;
+ }
}
/**
@@ -381,9 +385,11 @@ public final class SCMDatanodeHeartbeatDispatcher {
@Override
public void mergeReport(ContainerReport nextReport) {
if (nextReport.getType() == ContainerReportType.ICR) {
- getReport().getReportList().addAll(
- ((ReportFromDatanode<IncrementalContainerReportProto>) nextReport)
- .getReport().getReportList());
+ // To update existing report list , need to create a builder and then
+ // merge new reports to existing report list.
+ IncrementalContainerReportProto reportProto =
getReport().toBuilder().addAllReport(
+ ((ReportFromDatanode<IncrementalContainerReportProto>)
nextReport).getReport().getReportList()).build();
+ setReport(reportProto);
}
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
index f49826e67d..d5962c0c40 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconUtils.java
@@ -45,7 +45,11 @@ import java.net.URL;
import org.apache.commons.lang3.RandomUtils;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.hdds.client.RatisReplicationConfig;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
+import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -53,6 +57,7 @@ import org.junit.jupiter.api.io.TempDir;
* Test Recon Utility methods.
*/
public class TestReconUtils {
+ private static PipelineID randomPipelineID = PipelineID.randomId();
@TempDir
private Path temporaryFolder;
@@ -234,4 +239,24 @@ public class TestReconUtils {
}
return index;
}
+
+ private static ContainerInfo.Builder getDefaultContainerInfoBuilder(
+ final HddsProtos.LifeCycleState state) {
+ return new ContainerInfo.Builder()
+ .setContainerID(RandomUtils.nextLong())
+ .setReplicationConfig(
+ RatisReplicationConfig
+ .getInstance(HddsProtos.ReplicationFactor.THREE))
+ .setState(state)
+ .setSequenceId(10000L)
+ .setOwner("TEST");
+ }
+
+
+ public static ContainerInfo getContainer(
+ final HddsProtos.LifeCycleState state) {
+ return getDefaultContainerInfoBuilder(state)
+ .setPipelineID(randomPipelineID)
+ .build();
+ }
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index efde79f9ba..3c572aa8e0 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -43,6 +43,7 @@ import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolPro
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State;
import
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.IncrementalContainerReportProto;
import org.apache.hadoop.hdds.scm.container.ContainerID;
+import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.ha.SCMContext;
import org.apache.hadoop.hdds.scm.net.NetworkTopology;
@@ -55,6 +56,7 @@ import org.apache.hadoop.hdds.scm.server.SCMStorageConfig;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
+import org.apache.hadoop.ozone.recon.TestReconUtils;
import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.io.TempDir;
@@ -165,6 +167,31 @@ public class TestReconIncrementalContainerReportHandler
}
}
+ @Test
+ public void testMergeMultipleICRs() {
+ final ContainerInfo container =
TestReconUtils.getContainer(LifeCycleState.OPEN);
+ final DatanodeDetails datanodeOne = randomDatanodeDetails();
+ final IncrementalContainerReportProto containerReport =
+ getIncrementalContainerReportProto(container.containerID(),
+ ContainerReplicaProto.State.CLOSED,
+ datanodeOne.getUuidString());
+ final IncrementalContainerReportFromDatanode icrFromDatanode1 =
+ new IncrementalContainerReportFromDatanode(
+ datanodeOne, containerReport);
+ final IncrementalContainerReportFromDatanode icrFromDatanode2 =
+ new IncrementalContainerReportFromDatanode(
+ datanodeOne, containerReport);
+ assertEquals(1, icrFromDatanode1.getReport().getReportList().size());
+ icrFromDatanode1.mergeReport(icrFromDatanode2);
+ assertEquals(2, icrFromDatanode1.getReport().getReportList().size());
+
+ final IncrementalContainerReportFromDatanode icrFromDatanode3 =
+ new IncrementalContainerReportFromDatanode(
+ datanodeOne, containerReport);
+ icrFromDatanode1.mergeReport(icrFromDatanode3);
+ assertEquals(3, icrFromDatanode1.getReport().getReportList().size());
+ }
+
private LifeCycleState getContainerStateFromReplicaState(
State state) {
switch (state) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]