This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new c1d5b4f390 HDDS-12718. Use NodeStateMap in MockNodeManager instead of
test-specific Node2ContainerMap (#8179)
c1d5b4f390 is described below
commit c1d5b4f3904d0caae586a35e255e14772a5eacf1
Author: Tsz-Wo Nicholas Sze <[email protected]>
AuthorDate: Fri Mar 28 04:05:42 2025 -0700
HDDS-12718. Use NodeStateMap in MockNodeManager instead of test-specific
Node2ContainerMap (#8179)
---
.../hadoop/hdds/scm/container/MockNodeManager.java | 47 ++--
.../hdds/scm/container/Node2ContainerMap.java | 165 -----------
.../hdds/scm/container/TestNode2ContainerMap.java | 308 ---------------------
3 files changed, 16 insertions(+), 504 deletions(-)
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
index 73186cad06..c3004983e3 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
@@ -23,7 +23,6 @@
import java.io.IOException;
import java.util.ArrayList;
-import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
@@ -61,7 +60,9 @@
import org.apache.hadoop.hdds.scm.node.NodeManager;
import org.apache.hadoop.hdds.scm.node.NodeStatus;
import org.apache.hadoop.hdds.scm.node.states.Node2PipelineMap;
+import org.apache.hadoop.hdds.scm.node.states.NodeAlreadyExistsException;
import org.apache.hadoop.hdds.scm.node.states.NodeNotFoundException;
+import org.apache.hadoop.hdds.scm.node.states.NodeStateMap;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.server.events.EventPublisher;
@@ -104,7 +105,7 @@ public class MockNodeManager implements NodeManager {
private final SCMNodeStat aggregateStat;
private final Map<UUID, List<SCMCommand<?>>> commandMap;
private Node2PipelineMap node2PipelineMap;
- private final Node2ContainerMap node2ContainerMap;
+ private final NodeStateMap node2ContainerMap;
private NetworkTopology clusterMap;
private ConcurrentMap<String, Set<String>> dnsToUuidMap;
private int numHealthyDisksPerDatanode;
@@ -117,7 +118,7 @@ public class MockNodeManager implements NodeManager {
this.deadNodes = new LinkedList<>();
this.nodeMetricMap = new HashMap<>();
this.node2PipelineMap = new Node2PipelineMap();
- this.node2ContainerMap = new Node2ContainerMap();
+ this.node2ContainerMap = new NodeStateMap();
this.dnsToUuidMap = new ConcurrentHashMap<>();
this.aggregateStat = new SCMNodeStat();
this.clusterMap = new NetworkTopologyImpl(new OzoneConfiguration());
@@ -276,9 +277,8 @@ public List<DatanodeDetails> getNodes(
HddsTestUtils.createMetadataStorageReport(
"/metadata1-" + di.getUuidString(), capacity, used,
remaining, null);
- di.updateStorageReports(new ArrayList<>(Arrays.asList(storage1)));
- di.updateMetaDataStorageReports(
- new ArrayList<>(Arrays.asList(metaStorage1)));
+ di.updateStorageReports(Collections.singletonList(storage1));
+
di.updateMetaDataStorageReports(Collections.singletonList(metaStorage1));
healthyNodesWithInfo.add(di);
}
@@ -511,25 +511,13 @@ public void removePipeline(Pipeline pipeline) {
public void addContainer(DatanodeDetails dd,
ContainerID containerId)
throws NodeNotFoundException {
- try {
- Set<ContainerID> set = node2ContainerMap.getContainers(dd.getUuid());
- set.add(containerId);
- node2ContainerMap.setContainersForDatanode(dd.getUuid(), set);
- } catch (SCMException e) {
- e.printStackTrace();
- }
+ node2ContainerMap.getContainers(dd.getID()).add(containerId);
}
@Override
public void removeContainer(DatanodeDetails dd,
- ContainerID containerId) {
- try {
- Set<ContainerID> set = node2ContainerMap.getContainers(dd.getUuid());
- set.remove(containerId);
- node2ContainerMap.setContainersForDatanode(dd.getUuid(), set);
- } catch (SCMException e) {
- e.printStackTrace();
- }
+ ContainerID containerId) throws NodeNotFoundException {
+ node2ContainerMap.getContainers(dd.getID()).remove(containerId);
}
@Override
@@ -637,11 +625,7 @@ public Map<SCMCommandProto.Type, Integer>
getTotalDatanodeCommandCounts(
@Override
public void setContainers(DatanodeDetails uuid, Set<ContainerID>
containerIds)
throws NodeNotFoundException {
- try {
- node2ContainerMap.setContainersForDatanode(uuid.getUuid(), containerIds);
- } catch (SCMException e) {
- throw new NodeNotFoundException(uuid.getID());
- }
+ node2ContainerMap.setContainers(uuid.getID(), containerIds);
}
/**
@@ -650,8 +634,8 @@ public void setContainers(DatanodeDetails uuid,
Set<ContainerID> containerIds)
* @return - set of containerIDs
*/
@Override
- public Set<ContainerID> getContainers(DatanodeDetails uuid) {
- return node2ContainerMap.getContainers(uuid.getUuid());
+ public Set<ContainerID> getContainers(DatanodeDetails uuid) throws
NodeNotFoundException {
+ return node2ContainerMap.getContainers(uuid.getID());
}
// Returns the number of commands that is queued to this node manager.
@@ -721,15 +705,16 @@ public RegisteredCommand register(DatanodeDetails
datanodeDetails,
PipelineReportsProto pipelineReportsProto,
LayoutVersionProto layoutInfo) {
try {
- node2ContainerMap.insertNewDatanode(datanodeDetails.getUuid(),
- Collections.emptySet());
+ node2ContainerMap.addNode(datanodeDetails,
+ NodeStatus.inServiceHealthy(),
+ layoutInfo);
addEntryTodnsToUuidMap(datanodeDetails.getIpAddress(),
datanodeDetails.getUuidString());
if (clusterMap != null) {
datanodeDetails.setNetworkName(datanodeDetails.getUuidString());
clusterMap.add(datanodeDetails);
}
- } catch (SCMException e) {
+ } catch (NodeAlreadyExistsException e) {
e.printStackTrace();
}
return null;
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java
deleted file mode 100644
index d8d18536b6..0000000000
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/Node2ContainerMap.java
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import static
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.DUPLICATE_DATANODE;
-import static
org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes.NO_SUCH_DATANODE;
-
-import jakarta.annotation.Nonnull;
-import java.util.Collections;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.states.ReportResult;
-
-/**
- * This data structure maintains the list of containers that is on a datanode.
- * This information is built from the DN container reports.
- */
-class Node2ContainerMap {
- private final Map<UUID, Set<ContainerID>> dn2ContainerMap = new
ConcurrentHashMap<>();
-
-
- /**
- * Constructs a Node2ContainerMap Object.
- */
- Node2ContainerMap() {
- super();
- }
-
- /**
- * Returns null if there no containers associated with this datanode ID.
- *
- * @param datanode - UUID
- * @return Set of containers or Null.
- */
- public @Nonnull Set<ContainerID> getContainers(@Nonnull UUID datanode) {
- final Set<ContainerID> s = dn2ContainerMap.get(datanode);
- return s != null ? new HashSet<>(s) : Collections.emptySet();
- }
-
- /**
- * Returns true if this a datanode that is already tracked by
- * Node2ContainerMap.
- *
- * @param datanodeID - UUID of the Datanode.
- * @return True if this is tracked, false if this map does not know about it.
- */
- public boolean isKnownDatanode(@Nonnull UUID datanodeID) {
- return dn2ContainerMap.containsKey(datanodeID);
- }
-
- /**
- * Insert a new datanode into Node2Container Map.
- *
- * @param datanodeID -- Datanode UUID
- * @param containerIDs - List of ContainerIDs.
- */
- public void insertNewDatanode(@Nonnull UUID datanodeID, @Nonnull
Set<ContainerID> containerIDs)
- throws SCMException {
- if (dn2ContainerMap.putIfAbsent(datanodeID, new HashSet<>(containerIDs))
!= null) {
- throw new SCMException("Node already exists in the map",
DUPLICATE_DATANODE);
- }
- }
-
- /**
- * Removes datanode Entry from the map.
- *
- * @param datanodeID - Datanode ID.
- */
- public void removeDatanode(@Nonnull UUID datanodeID) {
- dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> null);
- }
-
- public @Nonnull ReportResult.ReportResultBuilder<ContainerID> newBuilder() {
- return new ReportResult.ReportResultBuilder<>();
- }
-
- public @Nonnull ReportResult<ContainerID> processReport(@Nonnull UUID
datanodeID, @Nonnull Set<ContainerID> objects) {
- if (!isKnownDatanode(datanodeID)) {
- return newBuilder()
- .setStatus(ReportResult.ReportStatus.NEW_DATANODE_FOUND)
- .setNewEntries(objects)
- .build();
- }
-
- // Conditions like Zero length containers should be handled by removeAll.
- Set<ContainerID> currentSet = dn2ContainerMap.get(datanodeID);
- TreeSet<ContainerID> newObjects = new TreeSet<>(objects);
- newObjects.removeAll(currentSet);
-
- TreeSet<ContainerID> missingObjects = new TreeSet<>(currentSet);
- missingObjects.removeAll(objects);
-
- if (newObjects.isEmpty() && missingObjects.isEmpty()) {
- return newBuilder()
- .setStatus(ReportResult.ReportStatus.ALL_IS_WELL)
- .build();
- }
-
- if (newObjects.isEmpty() && !missingObjects.isEmpty()) {
- return newBuilder()
- .setStatus(ReportResult.ReportStatus.MISSING_ENTRIES)
- .setMissingEntries(missingObjects)
- .build();
- }
-
- if (!newObjects.isEmpty() && missingObjects.isEmpty()) {
- return newBuilder()
- .setStatus(ReportResult.ReportStatus.NEW_ENTRIES_FOUND)
- .setNewEntries(newObjects)
- .build();
- }
-
- if (!newObjects.isEmpty() && !missingObjects.isEmpty()) {
- return newBuilder()
- .setStatus(ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND)
- .setNewEntries(newObjects)
- .setMissingEntries(missingObjects)
- .build();
- }
-
- // default status & Make compiler happy
- return newBuilder()
- .setStatus(ReportResult.ReportStatus.ALL_IS_WELL)
- .build();
- }
-
- /**
- * Updates the Container list of an existing DN.
- *
- * @param datanodeID - UUID of DN.
- * @param containers - Set of Containers tht is present on DN.
- * @throws SCMException - if we don't know about this datanode, for new DN
- * use addDatanodeInContainerMap.
- */
- public void setContainersForDatanode(@Nonnull UUID datanodeID, @Nonnull
Set<ContainerID> containers)
- throws SCMException {
- if (dn2ContainerMap.computeIfPresent(datanodeID, (k, v) -> new
HashSet<>(containers)) == null) {
- throw new SCMException("No such datanode", NO_SUCH_DATANODE);
- }
- }
-
- public int size() {
- return dn2ContainerMap.size();
- }
-}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java
deleted file mode 100644
index 8293a1dd6d..0000000000
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestNode2ContainerMap.java
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements. See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdds.scm.container;
-
-import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
-
-import java.util.Map;
-import java.util.Random;
-import java.util.Set;
-import java.util.TreeSet;
-import java.util.UUID;
-import java.util.concurrent.ConcurrentHashMap;
-import org.apache.hadoop.hdds.scm.exceptions.SCMException;
-import org.apache.hadoop.hdds.scm.node.states.ReportResult;
-import org.junit.jupiter.api.BeforeEach;
-import org.junit.jupiter.api.Test;
-
-/**
- * Test classes for Node2ContainerMap.
- */
-public class TestNode2ContainerMap {
- private static final int DATANODE_COUNT = 300;
- private static final int CONTAINER_COUNT = 1000;
- private final Map<UUID, TreeSet<ContainerID>> testData = new
- ConcurrentHashMap<>();
-
- private void generateData() {
- for (int dnIndex = 1; dnIndex <= DATANODE_COUNT; dnIndex++) {
- TreeSet<ContainerID> currentSet = new TreeSet<>();
- for (int cnIndex = 1; cnIndex <= CONTAINER_COUNT; cnIndex++) {
- long currentCnIndex = (long) (dnIndex * CONTAINER_COUNT) + cnIndex;
- currentSet.add(ContainerID.valueOf(currentCnIndex));
- }
- testData.put(UUID.randomUUID(), currentSet);
- }
- }
-
- private UUID getFirstKey() {
- return testData.keySet().iterator().next();
- }
-
- @BeforeEach
- public void setUp() throws Exception {
- generateData();
- }
-
- @Test
- public void testIsKnownDatanode() throws SCMException {
- Node2ContainerMap map = new Node2ContainerMap();
- UUID knownNode = getFirstKey();
- UUID unknownNode = UUID.randomUUID();
- Set<ContainerID> containerIDs = testData.get(knownNode);
- map.insertNewDatanode(knownNode, containerIDs);
- assertTrue(map.isKnownDatanode(knownNode),
- "Not able to detect a known node");
- assertFalse(map.isKnownDatanode(unknownNode),
- "Unknown node detected");
- }
-
- @Test
- public void testInsertNewDatanode() throws SCMException {
- Node2ContainerMap map = new Node2ContainerMap();
- UUID knownNode = getFirstKey();
- Set<ContainerID> containerIDs = testData.get(knownNode);
- map.insertNewDatanode(knownNode, containerIDs);
- Set<ContainerID> readSet = map.getContainers(knownNode);
-
- // Assert that all elements are present in the set that we read back from
- // node map.
- Set newSet = new TreeSet((readSet));
- assertTrue(newSet.removeAll(containerIDs));
- assertEquals(0, newSet.size());
-
- Throwable t = assertThrows(SCMException.class,
- () -> map.insertNewDatanode(knownNode, containerIDs));
- assertEquals("Node already exists in the map", t.getMessage());
-
- map.removeDatanode(knownNode);
- map.insertNewDatanode(knownNode, containerIDs);
-
- }
-
- @Test
- public void testProcessReportCheckOneNode() throws SCMException {
- UUID key = getFirstKey();
- Set<ContainerID> values = testData.get(key);
- Node2ContainerMap map = new Node2ContainerMap();
- map.insertNewDatanode(key, values);
- assertTrue(map.isKnownDatanode(key));
- ReportResult result = map.processReport(key, values);
- assertEquals(ReportResult.ReportStatus.ALL_IS_WELL, result.getStatus());
- }
-
- @Test
- public void testUpdateDatanodeMap() throws SCMException {
- UUID datanodeId = getFirstKey();
- Set<ContainerID> values = testData.get(datanodeId);
- Node2ContainerMap map = new Node2ContainerMap();
- map.insertNewDatanode(datanodeId, values);
- assertTrue(map.isKnownDatanode(datanodeId));
- assertEquals(CONTAINER_COUNT,
- map.getContainers(datanodeId).size());
-
- //remove one container
- values.remove(values.iterator().next());
- assertEquals(CONTAINER_COUNT - 1, values.size());
- assertEquals(CONTAINER_COUNT, map.getContainers(datanodeId).size());
-
- map.setContainersForDatanode(datanodeId, values);
-
- assertEquals(values.size(), map.getContainers(datanodeId).size());
- assertEquals(values, map.getContainers(datanodeId));
- }
-
- @Test
- public void testProcessReportInsertAll() throws SCMException {
- Node2ContainerMap map = new Node2ContainerMap();
-
- for (Map.Entry<UUID, TreeSet<ContainerID>> keyEntry : testData.entrySet())
{
- map.insertNewDatanode(keyEntry.getKey(), keyEntry.getValue());
- }
- // Assert all Keys are known datanodes.
- for (UUID key : testData.keySet()) {
- assertTrue(map.isKnownDatanode(key));
- }
- }
-
- /*
- For ProcessReport we have to test the following scenarios.
-
- 1. New Datanode - A new datanode appears and we have to add that to the
- SCM's Node2Container Map.
-
- 2. New Container - A Datanode exists, but a new container is added to that
- DN. We need to detect that and return a list of added containers.
-
- 3. Missing Container - A Datanode exists, but one of the expected container
- on that datanode is missing. We need to detect that.
-
- 4. We get a container report that has both the missing and new containers.
- We need to return separate lists for these.
- */
-
- /**
- * Assert that we are able to detect the addition of a new datanode.
- *
- * @throws SCMException
- */
- @Test
- public void testProcessReportDetectNewDataNode() {
- Node2ContainerMap map = new Node2ContainerMap();
- // If we attempt to process a node that is not present in the map,
- // we get a result back that says, NEW_NODE_FOUND.
- UUID key = getFirstKey();
- TreeSet<ContainerID> values = testData.get(key);
- ReportResult result = map.processReport(key, values);
- assertEquals(ReportResult.ReportStatus.NEW_DATANODE_FOUND,
result.getStatus());
- assertEquals(result.getNewEntries().size(), values.size());
- }
-
- /**
- * This test asserts that processReport is able to detect new containers
- * when it is added to a datanode. For that we populate the DN with a list
- * of containerIDs and then add few more containers and make sure that we
- * are able to detect them.
- *
- * @throws SCMException
- */
- @Test
- public void testProcessReportDetectNewContainers() throws SCMException {
- Node2ContainerMap map = new Node2ContainerMap();
- UUID key = getFirstKey();
- TreeSet<ContainerID> values = testData.get(key);
- map.insertNewDatanode(key, values);
-
- final int newCount = 100;
- ContainerID last = values.last();
- TreeSet<ContainerID> addedContainers = new TreeSet<>();
- for (int x = 1; x <= newCount; x++) {
- long cTemp = last.getId() + x;
- addedContainers.add(ContainerID.valueOf(cTemp));
- }
-
- // This set is the super set of existing containers and new containers.
- TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
- newContainersSet.addAll(addedContainers);
-
- ReportResult result = map.processReport(key, newContainersSet);
-
- //Assert that expected size of missing container is same as addedContainers
- assertEquals(ReportResult.ReportStatus.NEW_ENTRIES_FOUND,
result.getStatus());
-
- assertEquals(addedContainers.size(), result.getNewEntries().size());
-
- // Assert that the Container IDs are the same as we added new.
- assertTrue(result.getNewEntries().removeAll(addedContainers),
- "All objects are not removed.");
- }
-
- /**
- * This test asserts that processReport is able to detect missing containers
- * if they are misssing from a list.
- *
- * @throws SCMException
- */
- @Test
- public void testProcessReportDetectMissingContainers() throws SCMException {
- Node2ContainerMap map = new Node2ContainerMap();
- UUID key = getFirstKey();
- TreeSet<ContainerID> values = testData.get(key);
- map.insertNewDatanode(key, values);
-
- final int removeCount = 100;
- Random r = new Random();
-
- ContainerID first = values.first();
- TreeSet<ContainerID> removedContainers = new TreeSet<>();
-
- // Pick a random container to remove it is ok to collide no issues.
- for (int x = 0; x < removeCount; x++) {
- int startBase = (int) first.getId();
- long cTemp = r.nextInt(values.size());
- removedContainers.add(ContainerID.valueOf(cTemp + startBase));
- }
-
- // This set is a new set with some containers removed.
- TreeSet<ContainerID> newContainersSet = new TreeSet<>(values);
- newContainersSet.removeAll(removedContainers);
-
- ReportResult result = map.processReport(key, newContainersSet);
-
-
- //Assert that expected size of missing container is same as addedContainers
- assertEquals(ReportResult.ReportStatus.MISSING_ENTRIES,
result.getStatus());
- assertEquals(removedContainers.size(), result.getMissingEntries().size());
-
- // Assert that the Container IDs are the same as we added new.
- assertTrue(
- result.getMissingEntries().removeAll(removedContainers),
- "All missing containers not found.");
- }
-
- @Test
- public void testProcessReportDetectNewAndMissingContainers() throws
- SCMException {
- Node2ContainerMap map = new Node2ContainerMap();
- UUID key = getFirstKey();
- TreeSet<ContainerID> values = testData.get(key);
- map.insertNewDatanode(key, values);
-
- Set<ContainerID> insertedSet = new TreeSet<>();
- // Insert nodes from 1..30
- for (int x = 1; x <= 30; x++) {
- insertedSet.add(ContainerID.valueOf(x));
- }
-
-
- final int removeCount = 100;
- Random r = new Random();
-
- ContainerID first = values.first();
- TreeSet<ContainerID> removedContainers = new TreeSet<>();
-
- // Pick a random container to remove it is ok to collide no issues.
- for (int x = 0; x < removeCount; x++) {
- int startBase = (int) first.getId();
- long cTemp = r.nextInt(values.size());
- removedContainers.add(ContainerID.valueOf(cTemp + startBase));
- }
-
- Set<ContainerID> newSet = new TreeSet<>(values);
- newSet.addAll(insertedSet);
- newSet.removeAll(removedContainers);
-
- ReportResult result = map.processReport(key, newSet);
-
-
- assertEquals(ReportResult.ReportStatus.MISSING_AND_NEW_ENTRIES_FOUND,
result.getStatus());
- assertEquals(removedContainers.size(), result.getMissingEntries().size());
-
-
- // Assert that the Container IDs are the same as we added new.
- assertTrue(result.getMissingEntries().removeAll(removedContainers), "All
missing containers not found.");
-
- assertEquals(insertedSet.size(), result.getNewEntries().size());
-
- // Assert that the Container IDs are the same as we added new.
- assertTrue(result.getNewEntries().removeAll(insertedSet), "All inserted
containers are not found.");
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]