This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new f21e2a1e0f HDDS-6954. Migrate simple tests in ozone-recon to JUnit5
(#3556)
f21e2a1e0f is described below
commit f21e2a1e0fddd50d8881a5a13d15a30bb44c9bbd
Author: Kaijie Chen <[email protected]>
AuthorDate: Wed Jul 13 04:07:40 2022 +0800
HDDS-6954. Migrate simple tests in ozone-recon to JUnit5 (#3556)
---
.../apache/hadoop/ozone/recon/TestReconCodecs.java | 12 +--
.../hadoop/ozone/recon/api/TestEndpoints.java | 90 +++++++++++-----------
.../ozone/recon/api/TestTaskStatusService.java | 15 ++--
.../ozone/recon/api/filters/TestAdminFilter.java | 22 +++---
.../recon/fsck/TestContainerHealthStatus.java | 12 +--
.../ozone/recon/fsck/TestContainerHealthTask.java | 16 ++--
.../TestContainerHealthTaskRecordGenerator.java | 14 ++--
.../recon/persistence/AbstractReconSqlDBTest.java | 2 +
.../TestReconInternalSchemaDefinition.java | 21 ++---
.../recon/persistence/TestSqlSchemaSetup.java | 6 +-
.../persistence/TestStatsSchemaDefinition.java | 29 +++----
.../TestUtilizationSchemaDefinition.java | 40 +++++-----
.../scm/AbstractReconContainerManagerTest.java | 20 ++---
.../ozone/recon/scm/TestReconContainerManager.java | 33 ++++----
...TestReconIncrementalContainerReportHandler.java | 12 +--
.../recon/scm/TestReconPipelineReportHandler.java | 2 +-
.../TestStorageContainerServiceProviderImpl.java | 12 +--
.../ozone/recon/tasks/TestFileSizeCountTask.java | 10 +--
.../recon/tasks/TestReconTaskControllerImpl.java | 44 +++++------
.../ozone/recon/tasks/TestTableCountTask.java | 10 +--
20 files changed, 208 insertions(+), 214 deletions(-)
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java
index 772c661dcc..6545f08103 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/TestReconCodecs.java
@@ -24,8 +24,8 @@ import
org.apache.hadoop.ozone.recon.api.types.ContainerKeyPrefix;
import org.apache.hadoop.ozone.recon.spi.impl.ContainerKeyPrefixCodec;
import org.apache.hadoop.hdds.utils.db.Codec;
import org.apache.hadoop.hdds.utils.db.IntegerCodec;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
/**
* Unit Tests for Codecs used in Recon.
@@ -39,10 +39,10 @@ public class TestReconCodecs {
Codec<ContainerKeyPrefix> codec = new ContainerKeyPrefixCodec();
byte[] persistedFormat = codec.toPersistedFormat(containerKeyPrefix);
- Assert.assertTrue(persistedFormat != null);
+ Assertions.assertTrue(persistedFormat != null);
ContainerKeyPrefix fromPersistedFormat =
codec.fromPersistedFormat(persistedFormat);
- Assert.assertEquals(containerKeyPrefix, fromPersistedFormat);
+ Assertions.assertEquals(containerKeyPrefix, fromPersistedFormat);
}
@Test
@@ -50,9 +50,9 @@ public class TestReconCodecs {
Integer i = 1000;
Codec<Integer> codec = new IntegerCodec();
byte[] persistedFormat = codec.toPersistedFormat(i);
- Assert.assertTrue(persistedFormat != null);
+ Assertions.assertTrue(persistedFormat != null);
Integer fromPersistedFormat =
codec.fromPersistedFormat(persistedFormat);
- Assert.assertEquals(i, fromPersistedFormat);
+ Assertions.assertEquals(i, fromPersistedFormat);
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
index fd01a1128b..9383ca9857 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestEndpoints.java
@@ -83,9 +83,9 @@ import
org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize;
import org.jooq.Configuration;
import org.jooq.DSLContext;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import static
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static
org.apache.hadoop.ozone.container.upgrade.UpgradeUtils.defaultLayoutVersionProto;
@@ -95,8 +95,8 @@ import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.initializ
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.writeDataToOm;
import static
org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl.PROMETHEUS_INSTANT_QUERY_API;
import static
org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
import static org.mockito.ArgumentMatchers.anyString;
@@ -250,7 +250,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
dslContext = getDslContext();
}
- @Before
+ @BeforeEach
public void setUp() throws Exception {
// The following setup runs only once
if (!isSetupDone) {
@@ -359,7 +359,7 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
// Process all events in the event queue
reconScm.getEventQueue().processAll(1000);
} catch (Exception ex) {
- Assert.fail(ex.getMessage());
+ Assertions.fail(ex.getMessage());
}
// Write Data to OM
@@ -402,47 +402,47 @@ public class TestEndpoints extends AbstractReconSqlDBTest
{
private void testDatanodeResponse(DatanodeMetadata datanodeMetadata)
throws IOException {
// Check NodeState and NodeOperationalState field existence
- Assert.assertEquals(NodeState.HEALTHY, datanodeMetadata.getState());
- Assert.assertEquals(NodeOperationalState.IN_SERVICE,
+ Assertions.assertEquals(NodeState.HEALTHY, datanodeMetadata.getState());
+ Assertions.assertEquals(NodeOperationalState.IN_SERVICE,
datanodeMetadata.getOperationalState());
String hostname = datanodeMetadata.getHostname();
switch (hostname) {
case HOST1:
- Assert.assertEquals(75000,
+ Assertions.assertEquals(75000,
datanodeMetadata.getDatanodeStorageReport().getCapacity());
- Assert.assertEquals(15400,
+ Assertions.assertEquals(15400,
datanodeMetadata.getDatanodeStorageReport().getRemaining());
- Assert.assertEquals(35000,
+ Assertions.assertEquals(35000,
datanodeMetadata.getDatanodeStorageReport().getUsed());
- Assert.assertEquals(1, datanodeMetadata.getPipelines().size());
- Assert.assertEquals(pipelineId,
+ Assertions.assertEquals(1, datanodeMetadata.getPipelines().size());
+ Assertions.assertEquals(pipelineId,
datanodeMetadata.getPipelines().get(0).getPipelineID().toString());
- Assert.assertEquals(pipeline.getReplicationConfig().getReplication(),
+ Assertions.assertEquals(pipeline.getReplicationConfig().getReplication(),
datanodeMetadata.getPipelines().get(0).getReplicationFactor());
- Assert.assertEquals(pipeline.getType().toString(),
+ Assertions.assertEquals(pipeline.getType().toString(),
datanodeMetadata.getPipelines().get(0).getReplicationType());
- Assert.assertEquals(pipeline.getLeaderNode().getHostName(),
+ Assertions.assertEquals(pipeline.getLeaderNode().getHostName(),
datanodeMetadata.getPipelines().get(0).getLeaderNode());
- Assert.assertEquals(1, datanodeMetadata.getLeaderCount());
+ Assertions.assertEquals(1, datanodeMetadata.getLeaderCount());
break;
case HOST2:
- Assert.assertEquals(130000,
+ Assertions.assertEquals(130000,
datanodeMetadata.getDatanodeStorageReport().getCapacity());
- Assert.assertEquals(17800,
+ Assertions.assertEquals(17800,
datanodeMetadata.getDatanodeStorageReport().getRemaining());
- Assert.assertEquals(80000,
+ Assertions.assertEquals(80000,
datanodeMetadata.getDatanodeStorageReport().getUsed());
- Assert.assertEquals(0, datanodeMetadata.getPipelines().size());
- Assert.assertEquals(0, datanodeMetadata.getLeaderCount());
+ Assertions.assertEquals(0, datanodeMetadata.getPipelines().size());
+ Assertions.assertEquals(0, datanodeMetadata.getLeaderCount());
break;
default:
- Assert.fail(String.format("Datanode %s not registered",
+ Assertions.fail(String.format("Datanode %s not registered",
hostname));
}
- Assert.assertEquals(HDDSLayoutVersionManager.maxLayoutVersion(),
+ Assertions.assertEquals(HDDSLayoutVersionManager.maxLayoutVersion(),
datanodeMetadata.getLayoutVersion());
}
@@ -451,14 +451,14 @@ public class TestEndpoints extends AbstractReconSqlDBTest
{
Response response = nodeEndpoint.getDatanodes();
DatanodesResponse datanodesResponse =
(DatanodesResponse) response.getEntity();
- Assert.assertEquals(2, datanodesResponse.getTotalCount());
- Assert.assertEquals(2, datanodesResponse.getDatanodes().size());
+ Assertions.assertEquals(2, datanodesResponse.getTotalCount());
+ Assertions.assertEquals(2, datanodesResponse.getDatanodes().size());
datanodesResponse.getDatanodes().forEach(datanodeMetadata -> {
try {
testDatanodeResponse(datanodeMetadata);
} catch (IOException e) {
- Assert.fail(e.getMessage());
+ Assertions.fail(e.getMessage());
}
});
@@ -520,20 +520,20 @@ public class TestEndpoints extends AbstractReconSqlDBTest
{
Response response = pipelineEndpoint.getPipelines();
PipelinesResponse pipelinesResponse =
(PipelinesResponse) response.getEntity();
- Assert.assertEquals(1, pipelinesResponse.getTotalCount());
- Assert.assertEquals(1, pipelinesResponse.getPipelines().size());
+ Assertions.assertEquals(1, pipelinesResponse.getTotalCount());
+ Assertions.assertEquals(1, pipelinesResponse.getPipelines().size());
PipelineMetadata pipelineMetadata =
pipelinesResponse.getPipelines().iterator().next();
- Assert.assertEquals(1, pipelineMetadata.getDatanodes().size());
- Assert.assertEquals(pipeline.getType().toString(),
+ Assertions.assertEquals(1, pipelineMetadata.getDatanodes().size());
+ Assertions.assertEquals(pipeline.getType().toString(),
pipelineMetadata.getReplicationType());
- Assert.assertEquals(pipeline.getReplicationConfig().getReplication(),
+ Assertions.assertEquals(pipeline.getReplicationConfig().getReplication(),
pipelineMetadata.getReplicationFactor());
- Assert.assertEquals(datanodeDetails.getHostName(),
+ Assertions.assertEquals(datanodeDetails.getHostName(),
pipelineMetadata.getLeaderNode());
- Assert.assertEquals(pipeline.getId().getId(),
+ Assertions.assertEquals(pipeline.getId().getId(),
pipelineMetadata.getPipelineId());
- Assert.assertEquals(5, pipelineMetadata.getLeaderElections());
+ Assertions.assertEquals(5, pipelineMetadata.getLeaderElections());
waitAndCheckConditionAfterHeartbeat(() -> {
Response response1 = pipelineEndpoint.getPipelines();
@@ -583,12 +583,12 @@ public class TestEndpoints extends AbstractReconSqlDBTest
{
ClusterStateResponse clusterStateResponse =
(ClusterStateResponse) response.getEntity();
- Assert.assertEquals(1, clusterStateResponse.getPipelines());
- Assert.assertEquals(0, clusterStateResponse.getVolumes());
- Assert.assertEquals(0, clusterStateResponse.getBuckets());
- Assert.assertEquals(0, clusterStateResponse.getKeys());
- Assert.assertEquals(2, clusterStateResponse.getTotalDatanodes());
- Assert.assertEquals(2, clusterStateResponse.getHealthyDatanodes());
+ Assertions.assertEquals(1, clusterStateResponse.getPipelines());
+ Assertions.assertEquals(0, clusterStateResponse.getVolumes());
+ Assertions.assertEquals(0, clusterStateResponse.getBuckets());
+ Assertions.assertEquals(0, clusterStateResponse.getKeys());
+ Assertions.assertEquals(2, clusterStateResponse.getTotalDatanodes());
+ Assertions.assertEquals(2, clusterStateResponse.getHealthyDatanodes());
waitAndCheckConditionAfterHeartbeat(() -> {
Response response1 = clusterStateEndpoint.getClusterState();
@@ -603,9 +603,9 @@ public class TestEndpoints extends AbstractReconSqlDBTest {
assertTrue(result.getRight());
response = clusterStateEndpoint.getClusterState();
clusterStateResponse = (ClusterStateResponse) response.getEntity();
- Assert.assertEquals(2, clusterStateResponse.getVolumes());
- Assert.assertEquals(2, clusterStateResponse.getBuckets());
- Assert.assertEquals(3, clusterStateResponse.getKeys());
+ Assertions.assertEquals(2, clusterStateResponse.getVolumes());
+ Assertions.assertEquals(2, clusterStateResponse.getBuckets());
+ Assertions.assertEquals(3, clusterStateResponse.getKeys());
}
@Test
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java
index d5da4a35a1..3f2701a575 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestTaskStatusService.java
@@ -24,9 +24,9 @@ import com.google.inject.Injector;
import org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest;
import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import javax.ws.rs.core.Response;
@@ -39,7 +39,7 @@ import java.util.List;
public class TestTaskStatusService extends AbstractReconSqlDBTest {
private TaskStatusService taskStatusService;
- @Before
+ @BeforeEach
public void setUp() {
Injector parentInjector = getInjector();
parentInjector.createChildInjector(new AbstractModule() {
@@ -67,10 +67,11 @@ public class TestTaskStatusService extends
AbstractReconSqlDBTest {
List<ReconTaskStatus> responseList = (List<ReconTaskStatus>)
response.getEntity();
- Assert.assertEquals(resultList.size(), responseList.size());
+ Assertions.assertEquals(resultList.size(), responseList.size());
for (ReconTaskStatus r : responseList) {
- Assert.assertEquals(reconTaskStatusRecord.getTaskName(),
r.getTaskName());
- Assert.assertEquals(reconTaskStatusRecord.getLastUpdatedTimestamp(),
+ Assertions.assertEquals(reconTaskStatusRecord.getTaskName(),
+ r.getTaskName());
+ Assertions.assertEquals(reconTaskStatusRecord.getLastUpdatedTimestamp(),
r.getLastUpdatedTimestamp());
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java
index 4f85866f15..21ea5e00f0 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/filters/TestAdminFilter.java
@@ -28,8 +28,8 @@ import org.apache.hadoop.ozone.recon.api.NodeEndpoint;
import org.apache.hadoop.ozone.recon.api.PipelineEndpoint;
import org.apache.hadoop.ozone.recon.api.TaskStatusService;
import org.apache.hadoop.ozone.recon.api.UtilizationEndpoint;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import org.reflections.Reflections;
import org.reflections.scanners.SubTypesScanner;
@@ -62,7 +62,7 @@ public class TestAdminFilter {
Set<Class<?>> allEndpoints =
reflections.getTypesAnnotatedWith(Path.class);
- Assert.assertFalse(allEndpoints.isEmpty());
+ Assertions.assertFalse(allEndpoints.isEmpty());
// If an endpoint is added, it must be explicitly added to this set or be
// marked with @AdminOnly for this test to pass.
@@ -74,23 +74,21 @@ public class TestAdminFilter {
nonAdminEndpoints.add(PipelineEndpoint.class);
nonAdminEndpoints.add(TaskStatusService.class);
- Assert.assertTrue(allEndpoints.containsAll(nonAdminEndpoints));
+ Assertions.assertTrue(allEndpoints.containsAll(nonAdminEndpoints));
Set<Class<?>> adminEndpoints = Sets.difference(allEndpoints,
nonAdminEndpoints);
for (Class<?> endpoint: nonAdminEndpoints) {
- Assert.assertFalse(String.format("Endpoint class %s has been " +
- "declared as non admin in this test, but is marked as " +
- "@AdminOnly.", endpoint),
- endpoint.isAnnotationPresent(AdminOnly.class));
+ Assertions.assertFalse(endpoint.isAnnotationPresent(AdminOnly.class),
+ String.format("Endpoint class %s has been declared as non admin " +
+ "in this test, but is marked as @AdminOnly.", endpoint));
}
for (Class<?> endpoint: adminEndpoints) {
- Assert.assertTrue(String.format("Endpoint class %s must be marked as " +
- "@AdminOnly or explicitly declared as non admin in this test.",
- endpoint),
- endpoint.isAnnotationPresent(AdminOnly.class));
+ Assertions.assertTrue(endpoint.isAnnotationPresent(AdminOnly.class),
+ String.format("Endpoint class %s must be marked as @AdminOnly " +
+ "or explicitly declared as non admin in this test.", endpoint));
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
index 7d76f1ca8d..2f404c72ff 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthStatus.java
@@ -26,15 +26,15 @@ import org.apache.hadoop.hdds.scm.container.ContainerID;
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import
org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacementStatusDefault;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.util.HashSet;
import java.util.Set;
-import static junit.framework.TestCase.assertEquals;
-import static junit.framework.TestCase.assertTrue;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -47,7 +47,7 @@ public class TestContainerHealthStatus {
private PlacementPolicy placementPolicy;
private ContainerInfo container;
- @Before
+ @BeforeEach
public void setup() {
placementPolicy = mock(PlacementPolicy.class);
container = mock(ContainerInfo.class);
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
index 278283f829..f3ddf47e0c 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTask.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.ozone.recon.fsck;
-import static junit.framework.TestCase.assertEquals;
import static
org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates.ALL_REPLICAS_UNHEALTHY;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -57,8 +57,8 @@ import
org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
import org.hadoop.ozone.recon.schema.tables.daos.UnhealthyContainersDao;
import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
/**
* Class to test a single run of the Container Health Task.
@@ -128,7 +128,7 @@ public class TestContainerHealthTask extends
AbstractReconSqlDBTest {
State.CLOSED, State.CLOSED, State.CLOSED));
List<UnhealthyContainers> all = unHealthyContainersTableHandle.findAll();
- Assert.assertTrue(all.isEmpty());
+ Assertions.assertTrue(all.isEmpty());
long currentTime = System.currentTimeMillis();
ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class);
@@ -177,7 +177,7 @@ public class TestContainerHealthTask extends
AbstractReconSqlDBTest {
ReconTaskStatus taskStatus =
reconTaskStatusDao.findById(containerHealthTask.getTaskName());
- Assert.assertTrue(taskStatus.getLastUpdatedTimestamp() >
+ Assertions.assertTrue(taskStatus.getLastUpdatedTimestamp() >
currentTime);
// Now run the job again, to check that relevant records are updated or
@@ -272,7 +272,7 @@ public class TestContainerHealthTask extends
AbstractReconSqlDBTest {
.thenReturn(new ContainerWithPipeline(mockDeletedContainer, null));
List<UnhealthyContainers> all = unHealthyContainersTableHandle.findAll();
- Assert.assertTrue(all.isEmpty());
+ Assertions.assertTrue(all.isEmpty());
long currentTime = System.currentTimeMillis();
ReconTaskStatusDao reconTaskStatusDao = getDao(ReconTaskStatusDao.class);
@@ -293,7 +293,7 @@ public class TestContainerHealthTask extends
AbstractReconSqlDBTest {
ReconTaskStatus taskStatus =
reconTaskStatusDao.findById(containerHealthTask.getTaskName());
- Assert.assertTrue(taskStatus.getLastUpdatedTimestamp() >
+ Assertions.assertTrue(taskStatus.getLastUpdatedTimestamp() >
currentTime);
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
index 855a64d280..c88c2440f9 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/fsck/TestContainerHealthTaskRecordGenerator.java
@@ -28,20 +28,20 @@ import
org.apache.hadoop.hdds.scm.container.placement.algorithms.ContainerPlacem
import
org.hadoop.ozone.recon.schema.ContainerSchemaDefinition.UnHealthyContainerStates;
import org.hadoop.ozone.recon.schema.tables.pojos.UnhealthyContainers;
import org.hadoop.ozone.recon.schema.tables.records.UnhealthyContainersRecord;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
-import static junit.framework.TestCase.assertEquals;
-import static junit.framework.TestCase.assertNotNull;
-import static junit.framework.TestCase.assertTrue;
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto;
-import static org.junit.Assert.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -54,7 +54,7 @@ public class TestContainerHealthTaskRecordGenerator {
private PlacementPolicy placementPolicy;
private ContainerInfo container;
- @Before
+ @BeforeEach
public void setup() {
placementPolicy = mock(PlacementPolicy.class);
container = mock(ContainerInfo.class);
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java
index 664a7321d9..c8447fffda 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/AbstractReconSqlDBTest.java
@@ -39,6 +39,7 @@ import org.jooq.impl.DefaultConfiguration;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Rule;
+import org.junit.jupiter.api.BeforeEach;
import org.junit.rules.TemporaryFolder;
import com.google.inject.AbstractModule;
@@ -79,6 +80,7 @@ public class AbstractReconSqlDBTest {
}
}
+ @BeforeEach
@Before
public void createReconSchemaForTest() throws IOException {
injector = Guice.createInjector(getReconSqlDBModules());
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java
index befd1edb0e..5570484c0f 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestReconInternalSchemaDefinition.java
@@ -19,6 +19,8 @@
package org.apache.hadoop.ozone.recon.persistence;
import static
org.hadoop.ozone.recon.schema.ReconTaskSchemaDefinition.RECON_TASK_STATUS_TABLE_NAME;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -31,8 +33,7 @@ import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Class used to test ReconInternalSchemaDefinition.
@@ -64,8 +65,8 @@ public class TestReconInternalSchemaDefinition extends
AbstractReconSqlDBTest {
resultSet.getInt("DATA_TYPE")));
}
- Assert.assertEquals(3, actualPairs.size());
- Assert.assertEquals(expectedPairs, actualPairs);
+ assertEquals(3, actualPairs.size());
+ assertEquals(expectedPairs, actualPairs);
}
@Test
@@ -77,7 +78,7 @@ public class TestReconInternalSchemaDefinition extends
AbstractReconSqlDBTest {
RECON_TASK_STATUS_TABLE_NAME, null);
while (resultSet.next()) {
- Assert.assertEquals(RECON_TASK_STATUS_TABLE_NAME,
+ assertEquals(RECON_TASK_STATUS_TABLE_NAME,
resultSet.getString("TABLE_NAME"));
}
@@ -101,9 +102,9 @@ public class TestReconInternalSchemaDefinition extends
AbstractReconSqlDBTest {
// Read
ReconTaskStatus dbRecord = dao.findById("HelloWorldTask");
- Assert.assertEquals("HelloWorldTask", dbRecord.getTaskName());
- Assert.assertEquals(Long.valueOf(now), dbRecord.getLastUpdatedTimestamp());
- Assert.assertEquals(Long.valueOf(100), dbRecord.getLastUpdatedSeqNumber());
+ assertEquals("HelloWorldTask", dbRecord.getTaskName());
+ assertEquals(Long.valueOf(now), dbRecord.getLastUpdatedTimestamp());
+ assertEquals(Long.valueOf(100), dbRecord.getLastUpdatedSeqNumber());
// Update
dbRecord.setLastUpdatedSeqNumber(150L);
@@ -111,7 +112,7 @@ public class TestReconInternalSchemaDefinition extends
AbstractReconSqlDBTest {
// Read updated
dbRecord = dao.findById("HelloWorldTask");
- Assert.assertEquals(Long.valueOf(150), dbRecord.getLastUpdatedSeqNumber());
+ assertEquals(Long.valueOf(150), dbRecord.getLastUpdatedSeqNumber());
// Delete
dao.deleteById("GoodbyeWorldTask");
@@ -119,7 +120,7 @@ public class TestReconInternalSchemaDefinition extends
AbstractReconSqlDBTest {
// Verify
dbRecord = dao.findById("GoodbyeWorldTask");
- Assert.assertNull(dbRecord);
+ assertNull(dbRecord);
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java
index 19f8d7082e..498faa3d43 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestSqlSchemaSetup.java
@@ -19,14 +19,14 @@
package org.apache.hadoop.ozone.recon.persistence;
import static
org.apache.hadoop.ozone.recon.ReconControllerModule.ReconDaoBindingModule.RECON_DAO_LIST;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.sql.SQLException;
import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Class to test basic SQL schema setup.
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java
index af08383dab..dcc971a79f 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestStatsSchemaDefinition.java
@@ -21,8 +21,8 @@ import org.apache.commons.lang3.tuple.ImmutablePair;
import org.apache.commons.lang3.tuple.Pair;
import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
import org.hadoop.ozone.recon.schema.tables.pojos.GlobalStats;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.Test;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -61,8 +61,8 @@ public class TestStatsSchemaDefinition extends
AbstractReconSqlDBTest {
resultSet.getInt("DATA_TYPE")));
}
- Assert.assertEquals(3, actualPairs.size());
- Assert.assertEquals(expectedPairs, actualPairs);
+ Assertions.assertEquals(3, actualPairs.size());
+ Assertions.assertEquals(expectedPairs, actualPairs);
}
@Test
@@ -74,7 +74,7 @@ public class TestStatsSchemaDefinition extends
AbstractReconSqlDBTest {
GLOBAL_STATS_TABLE_NAME, null);
while (resultSet.next()) {
- Assert.assertEquals(GLOBAL_STATS_TABLE_NAME,
+ Assertions.assertEquals(GLOBAL_STATS_TABLE_NAME,
resultSet.getString("TABLE_NAME"));
}
@@ -97,14 +97,15 @@ public class TestStatsSchemaDefinition extends
AbstractReconSqlDBTest {
// Read
GlobalStats dbRecord = dao.findById("key1");
- Assert.assertEquals("key1", dbRecord.getKey());
- Assert.assertEquals(Long.valueOf(500), dbRecord.getValue());
- Assert.assertEquals(new Timestamp(now),
dbRecord.getLastUpdatedTimestamp());
+ Assertions.assertEquals("key1", dbRecord.getKey());
+ Assertions.assertEquals(Long.valueOf(500), dbRecord.getValue());
+ Assertions.assertEquals(new Timestamp(now),
+ dbRecord.getLastUpdatedTimestamp());
dbRecord = dao.findById("key2");
- Assert.assertEquals("key2", dbRecord.getKey());
- Assert.assertEquals(Long.valueOf(10), dbRecord.getValue());
- Assert.assertEquals(new Timestamp(now + 1000L),
+ Assertions.assertEquals("key2", dbRecord.getKey());
+ Assertions.assertEquals(Long.valueOf(10), dbRecord.getValue());
+ Assertions.assertEquals(new Timestamp(now + 1000L),
dbRecord.getLastUpdatedTimestamp());
// Update
@@ -115,9 +116,9 @@ public class TestStatsSchemaDefinition extends
AbstractReconSqlDBTest {
// Read updated
dbRecord = dao.findById("key2");
- Assert.assertEquals(new Timestamp(now + 2000L),
+ Assertions.assertEquals(new Timestamp(now + 2000L),
dbRecord.getLastUpdatedTimestamp());
- Assert.assertEquals(Long.valueOf(100L), dbRecord.getValue());
+ Assertions.assertEquals(Long.valueOf(100L), dbRecord.getValue());
// Delete
dao.deleteById("key1");
@@ -125,6 +126,6 @@ public class TestStatsSchemaDefinition extends
AbstractReconSqlDBTest {
// Verify
dbRecord = dao.findById("key1");
- Assert.assertNull(dbRecord);
+ Assertions.assertNull(dbRecord);
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
index 59689237c6..3c9c199584 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/persistence/TestUtilizationSchemaDefinition.java
@@ -21,7 +21,8 @@ import static
org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.CLUSTER_
import static
org.hadoop.ozone.recon.schema.UtilizationSchemaDefinition.FILE_COUNT_BY_SIZE_TABLE_NAME;
import static
org.hadoop.ozone.recon.schema.tables.ClusterGrowthDailyTable.CLUSTER_GROWTH_DAILY;
import static
org.hadoop.ozone.recon.schema.tables.FileCountBySizeTable.FILE_COUNT_BY_SIZE;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
import java.sql.Connection;
import java.sql.DatabaseMetaData;
@@ -43,8 +44,7 @@ import
org.hadoop.ozone.recon.schema.tables.records.FileCountBySizeRecord;
import org.jooq.Record3;
import org.jooq.Table;
import org.jooq.UniqueKey;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Test persistence module provides connection and transaction awareness.
@@ -77,8 +77,8 @@ public class TestUtilizationSchemaDefinition extends
AbstractReconSqlDBTest {
resultSet.getInt("DATA_TYPE")));
}
- Assert.assertEquals(8, actualPairs.size());
- Assert.assertEquals(expectedPairs, actualPairs);
+ assertEquals(8, actualPairs.size());
+ assertEquals(expectedPairs, actualPairs);
ResultSet resultSetFileCount = metaData.getColumns(null, null,
FILE_COUNT_BY_SIZE_TABLE_NAME, null);
@@ -99,10 +99,10 @@ public class TestUtilizationSchemaDefinition extends
AbstractReconSqlDBTest {
"COLUMN_NAME"), resultSetFileCount.getInt(
"DATA_TYPE")));
}
- assertEquals("Unexpected number of columns",
- 4, actualPairsFileCount.size());
- assertEquals("Columns Do not Match ",
- expectedPairsFileCount, actualPairsFileCount);
+ assertEquals(4, actualPairsFileCount.size(),
+ "Unexpected number of columns");
+ assertEquals(expectedPairsFileCount, actualPairsFileCount,
+ "Columns Do not Match ");
}
@Test
@@ -115,7 +115,7 @@ public class TestUtilizationSchemaDefinition extends
AbstractReconSqlDBTest {
CLUSTER_GROWTH_DAILY_TABLE_NAME, null);
while (resultSet.next()) {
- Assert.assertEquals(CLUSTER_GROWTH_DAILY_TABLE_NAME,
+ assertEquals(CLUSTER_GROWTH_DAILY_TABLE_NAME,
resultSet.getString("TABLE_NAME"));
}
@@ -140,12 +140,12 @@ public class TestUtilizationSchemaDefinition extends
AbstractReconSqlDBTest {
CLUSTER_GROWTH_DAILY.DATANODE_ID)
.value1(new Timestamp(now)).value2(10));
- Assert.assertEquals("host1", dbRecord.getDatanodeHost());
- Assert.assertEquals("rack1", dbRecord.getRackId());
- Assert.assertEquals(Long.valueOf(1024), dbRecord.getAvailableSize());
- Assert.assertEquals(Long.valueOf(512), dbRecord.getUsedSize());
- Assert.assertEquals(Integer.valueOf(10), dbRecord.getContainerCount());
- Assert.assertEquals(Integer.valueOf(25), dbRecord.getBlockCount());
+ assertEquals("host1", dbRecord.getDatanodeHost());
+ assertEquals("rack1", dbRecord.getRackId());
+ assertEquals(Long.valueOf(1024), dbRecord.getAvailableSize());
+ assertEquals(Long.valueOf(512), dbRecord.getUsedSize());
+ assertEquals(Integer.valueOf(10), dbRecord.getContainerCount());
+ assertEquals(Integer.valueOf(25), dbRecord.getBlockCount());
// Update
dbRecord.setUsedSize(700L);
@@ -158,8 +158,8 @@ public class TestUtilizationSchemaDefinition extends
AbstractReconSqlDBTest {
CLUSTER_GROWTH_DAILY.DATANODE_ID)
.value1(new Timestamp(now)).value2(10));
- Assert.assertEquals(Long.valueOf(700), dbRecord.getUsedSize());
- Assert.assertEquals(Integer.valueOf(30), dbRecord.getBlockCount());
+ assertEquals(Long.valueOf(700), dbRecord.getUsedSize());
+ assertEquals(Integer.valueOf(30), dbRecord.getBlockCount());
// Delete
dao.deleteById(getDslContext().newRecord(CLUSTER_GROWTH_DAILY.TIMESTAMP,
@@ -172,7 +172,7 @@ public class TestUtilizationSchemaDefinition extends
AbstractReconSqlDBTest {
CLUSTER_GROWTH_DAILY.DATANODE_ID)
.value1(new Timestamp(now)).value2(10));
- Assert.assertNull(dbRecord);
+ assertNull(dbRecord);
}
@Test
@@ -184,7 +184,7 @@ public class TestUtilizationSchemaDefinition extends
AbstractReconSqlDBTest {
FILE_COUNT_BY_SIZE_TABLE_NAME, null);
while (resultSet.next()) {
- Assert.assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME,
+ assertEquals(FILE_COUNT_BY_SIZE_TABLE_NAME,
resultSet.getString("TABLE_NAME"));
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
index 9fac275b33..1708dea00b 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/AbstractReconContainerManagerTest.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.recon.scm;
+import java.io.File;
import java.io.IOException;
import java.time.ZoneId;
import java.util.LinkedList;
@@ -60,10 +61,9 @@ import static
org.apache.hadoop.hdds.scm.metadata.SCMDBDefinition.CONTAINERS;
import static
org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_METADATA_DIRS;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
-import org.junit.After;
-import org.junit.Before;
-import org.junit.Rule;
-import org.junit.rules.TemporaryFolder;
+import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.io.TempDir;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -73,9 +73,6 @@ import static org.mockito.Mockito.when;
*/
public class AbstractReconContainerManagerTest {
- @Rule
- public TemporaryFolder temporaryFolder = new TemporaryFolder();
-
private OzoneConfiguration conf;
private SCMStorageConfig scmStorageConfig;
private ReconPipelineManager pipelineManager;
@@ -86,11 +83,10 @@ public class AbstractReconContainerManagerTest {
private SCMContext scmContext;
private SequenceIdGenerator sequenceIdGen;
- @Before
- public void setUp() throws Exception {
+ @BeforeEach
+ public void setUp(@TempDir File tempDir) throws Exception {
conf = new OzoneConfiguration();
- conf.set(OZONE_METADATA_DIRS,
- temporaryFolder.newFolder().getAbsolutePath());
+ conf.set(OZONE_METADATA_DIRS, tempDir.getAbsolutePath());
conf.set(OZONE_SCM_NAMES, "localhost");
store = DBStoreBuilder.createDBStore(conf, new ReconSCMDBDefinition());
scmhaManager = SCMHAManagerStub.getInstance(
@@ -131,7 +127,7 @@ public class AbstractReconContainerManagerTest {
pendingOps);
}
- @After
+ @AfterEach
public void tearDown() throws Exception {
containerManager.close();
pipelineManager.close();
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
index 7a03929cf8..a373ff9aec 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconContainerManager.java
@@ -23,9 +23,9 @@ import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CL
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.CLOSING;
import static
org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.OPEN;
import static
org.apache.hadoop.ozone.recon.OMMetadataManagerTestUtils.getRandomPipeline;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.IOException;
import java.util.LinkedList;
@@ -47,8 +47,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerReplica;
import
org.apache.hadoop.hdds.scm.container.common.helpers.ContainerWithPipeline;
import org.apache.hadoop.hdds.scm.ha.SCMHAManager;
import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
-import org.junit.Assert;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Test Recon Container Manager.
@@ -217,7 +216,7 @@ public class TestReconContainerManager
final Map<Long, Map<UUID, ContainerReplicaHistory>> repHistMap =
containerManager.getReplicaHistoryMap();
// Should be empty at the beginning
- Assert.assertEquals(0, repHistMap.size());
+ assertEquals(0, repHistMap.size());
// Put a replica info and call updateContainerReplica
Pipeline pipeline = getRandomPipeline();
@@ -230,11 +229,11 @@ public class TestReconContainerManager
containerManager.updateContainerReplica(containerID1, containerReplica1);
// Should have 1 container entry in the replica history map
- Assert.assertEquals(1, repHistMap.size());
+ assertEquals(1, repHistMap.size());
// Should only have 1 entry for this replica (on DN01)
- Assert.assertEquals(1, repHistMap.get(cIDlong1).size());
+ assertEquals(1, repHistMap.get(cIDlong1).size());
ContainerReplicaHistory repHist1 = repHistMap.get(cIDlong1).get(uuid1);
- Assert.assertEquals(uuid1, repHist1.getUuid());
+ assertEquals(uuid1, repHist1.getUuid());
// Because this is a new entry, first seen time equals last seen time
assertEquals(repHist1.getLastSeenTime(), repHist1.getFirstSeenTime());
assertEquals(containerReplica1.getSequenceId().longValue(),
@@ -246,9 +245,9 @@ public class TestReconContainerManager
.setDatanodeDetails(datanodeDetails1).setSequenceId(1051L).build();
containerManager.updateContainerReplica(containerID1, containerReplica1);
// Should still have 1 entry in the replica history map
- Assert.assertEquals(1, repHistMap.size());
+ assertEquals(1, repHistMap.size());
// Now last seen time should be larger than first seen time
- Assert.assertTrue(repHist1.getLastSeenTime() >
repHist1.getFirstSeenTime());
+ assertTrue(repHist1.getLastSeenTime() > repHist1.getFirstSeenTime());
assertEquals(1051L, repHist1.getBcsId());
// Init DN02
@@ -263,11 +262,11 @@ public class TestReconContainerManager
containerManager.updateContainerReplica(containerID1, containerReplica2);
// Should still have 1 container entry in the replica history map
- Assert.assertEquals(1, repHistMap.size());
+ assertEquals(1, repHistMap.size());
// Should have 2 entries for this replica (on DN01 and DN02)
- Assert.assertEquals(2, repHistMap.get(cIDlong1).size());
+ assertEquals(2, repHistMap.get(cIDlong1).size());
ContainerReplicaHistory repHist2 = repHistMap.get(cIDlong1).get(uuid2);
- Assert.assertEquals(uuid2, repHist2.getUuid());
+ assertEquals(uuid2, repHist2.getUuid());
// Because this is a new entry, first seen time equals last seen time
assertEquals(repHist2.getLastSeenTime(), repHist2.getFirstSeenTime());
assertEquals(1051L, repHist2.getBcsId());
@@ -275,11 +274,11 @@ public class TestReconContainerManager
// Remove replica from DN01
containerManager.removeContainerReplica(containerID1, containerReplica1);
// Should still have 1 container entry in the replica history map
- Assert.assertEquals(1, repHistMap.size());
+ assertEquals(1, repHistMap.size());
// Should have 1 entry for this replica
- Assert.assertEquals(1, repHistMap.get(cIDlong1).size());
+ assertEquals(1, repHistMap.get(cIDlong1).size());
// And the only entry should match DN02
- Assert.assertEquals(uuid2,
+ assertEquals(uuid2,
repHistMap.get(cIDlong1).keySet().iterator().next());
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
index 228244645f..c8abe3c794 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconIncrementalContainerReportHandler.java
@@ -21,8 +21,8 @@ package org.apache.hadoop.ozone.recon.scm;
import static
org.apache.hadoop.hdds.protocol.MockDatanodeDetails.randomDatanodeDetails;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.LifeCycleState.OPEN;
import static
org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager.maxLayoutVersion;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -55,7 +55,7 @@ import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.hdds.server.events.EventQueue;
import org.apache.hadoop.hdds.upgrade.HDDSLayoutVersionManager;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
import org.mockito.Mockito;
/**
@@ -153,9 +153,9 @@ public class TestReconIncrementalContainerReportHandler
LifeCycleState expectedState = getContainerStateFromReplicaState(state);
LifeCycleState actualState =
containerManager.getContainer(containerID).getState();
- assertEquals(String.format("Expecting %s in " +
- "container state for replica state %s", expectedState,
- state), expectedState, actualState);
+ assertEquals(expectedState, actualState,
+ String.format("Expecting %s in container state for replica state %s",
+ expectedState, state));
}
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
index 049559efec..20d5e92901 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/scm/TestReconPipelineReportHandler.java
@@ -35,7 +35,7 @@ import org.apache.hadoop.hdds.scm.pipeline.Pipeline;
import org.apache.hadoop.hdds.scm.pipeline.PipelineID;
import org.apache.hadoop.hdds.server.events.EventPublisher;
import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
-import org.junit.Test;
+import org.junit.jupiter.api.Test;
/**
* Class to test handling of known and new pipelines by Recon's pipeline
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java
index f9d3fc41fc..912cb571fc 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/spi/impl/TestStorageContainerServiceProviderImpl.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.ozone.recon.spi.impl;
-import static org.junit.Assert.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
@@ -36,9 +36,9 @@ import
org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol;
import org.apache.hadoop.ozone.recon.ReconUtils;
import org.apache.hadoop.ozone.recon.spi.StorageContainerServiceProvider;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.Assertions;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import com.google.inject.AbstractModule;
import com.google.inject.Guice;
@@ -52,7 +52,7 @@ public class TestStorageContainerServiceProviderImpl {
private Injector injector;
private HddsProtos.PipelineID pipelineID;
- @Before
+ @BeforeEach
public void setup() {
injector = Guice.createInjector(new AbstractModule() {
@Override
@@ -75,7 +75,7 @@ public class TestStorageContainerServiceProviderImpl {
toInstance(conf);
bind(ReconUtils.class).toInstance(reconUtils);
} catch (Exception e) {
- Assert.fail();
+ Assertions.fail();
}
}
});
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
index 5f4ecdab6e..dd16ecb635 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestFileSizeCountTask.java
@@ -31,8 +31,8 @@ import
org.hadoop.ozone.recon.schema.tables.daos.FileCountBySizeDao;
import org.hadoop.ozone.recon.schema.tables.pojos.FileCountBySize;
import org.jooq.DSLContext;
import org.jooq.Record3;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import org.mockito.AdditionalAnswers;
import java.io.IOException;
@@ -44,9 +44,9 @@ import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateActi
import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT;
import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE;
import static
org.hadoop.ozone.recon.schema.tables.FileCountBySizeTable.FILE_COUNT_BY_SIZE;
-import static org.junit.Assert.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.BDDMockito.given;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -60,7 +60,7 @@ public class TestFileSizeCountTask extends
AbstractReconSqlDBTest {
private FileSizeCountTask fileSizeCountTask;
private DSLContext dslContext;
- @Before
+ @BeforeEach
public void setUp() {
fileCountBySizeDao = getDao(FileCountBySizeDao.class);
UtilizationSchemaDefinition utilizationSchemaDefinition =
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java
index a89a4aea35..d79c90662a 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestReconTaskControllerImpl.java
@@ -18,9 +18,10 @@
package org.apache.hadoop.ozone.recon.tasks;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
@@ -36,9 +37,8 @@ import
org.apache.hadoop.ozone.recon.persistence.AbstractReconSqlDBTest;
import org.apache.hadoop.ozone.recon.recovery.ReconOMMetadataManager;
import org.hadoop.ozone.recon.schema.tables.daos.ReconTaskStatusDao;
import org.hadoop.ozone.recon.schema.tables.pojos.ReconTaskStatus;
-import org.junit.Assert;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
/**
* Class used to test ReconTaskControllerImpl.
@@ -48,7 +48,7 @@ public class TestReconTaskControllerImpl extends
AbstractReconSqlDBTest {
private ReconTaskController reconTaskController;
private ReconTaskStatusDao reconTaskStatusDao;
- @Before
+ @BeforeEach
public void setUp() {
OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
reconTaskStatusDao = getDao(ReconTaskStatusDao.class);
@@ -63,9 +63,9 @@ public class TestReconTaskControllerImpl extends
AbstractReconSqlDBTest {
DummyReconDBTask dummyReconDBTask =
new DummyReconDBTask(taskName, DummyReconDBTask.TaskType.ALWAYS_PASS);
reconTaskController.registerTask(dummyReconDBTask);
- assertTrue(reconTaskController.getRegisteredTasks().size() == 1);
- assertTrue(reconTaskController.getRegisteredTasks()
- .get(dummyReconDBTask.getTaskName()) == dummyReconDBTask);
+ assertEquals(1, reconTaskController.getRegisteredTasks().size());
+ assertSame(reconTaskController.getRegisteredTasks()
+ .get(dummyReconDBTask.getTaskName()), dummyReconDBTask);
}
@Test
@@ -92,10 +92,8 @@ public class TestReconTaskControllerImpl extends
AbstractReconSqlDBTest {
long taskTimeStamp = reconTaskStatus.getLastUpdatedTimestamp();
long seqNumber = reconTaskStatus.getLastUpdatedSeqNumber();
- Assert.assertTrue(startTime <= taskTimeStamp
- && taskTimeStamp <= endTime);
- Assert.assertEquals(seqNumber,
- omUpdateEventBatchMock.getLastSequenceNumber());
+ assertTrue(startTime <= taskTimeStamp && taskTimeStamp <= endTime);
+ assertEquals(seqNumber, omUpdateEventBatchMock.getLastSequenceNumber());
}
@Test
@@ -120,11 +118,10 @@ public class TestReconTaskControllerImpl extends
AbstractReconSqlDBTest {
reconTaskStatusDao = getDao(ReconTaskStatusDao.class);
ReconTaskStatus dbRecord = reconTaskStatusDao.findById(taskName);
- Assert.assertEquals(taskName, dbRecord.getTaskName());
- Assert.assertTrue(
- dbRecord.getLastUpdatedTimestamp() > currentTime);
+ assertEquals(taskName, dbRecord.getTaskName());
+ assertTrue(dbRecord.getLastUpdatedTimestamp() > currentTime);
- Assert.assertEquals(Long.valueOf(100L),
dbRecord.getLastUpdatedSeqNumber());
+ assertEquals(Long.valueOf(100L), dbRecord.getLastUpdatedSeqNumber());
}
@Test
@@ -156,9 +153,9 @@ public class TestReconTaskControllerImpl extends
AbstractReconSqlDBTest {
reconTaskStatusDao = getDao(ReconTaskStatusDao.class);
ReconTaskStatus dbRecord = reconTaskStatusDao.findById(taskName);
- Assert.assertEquals(taskName, dbRecord.getTaskName());
- Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedTimestamp());
- Assert.assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedSeqNumber());
+ assertEquals(taskName, dbRecord.getTaskName());
+ assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedTimestamp());
+ assertEquals(Long.valueOf(0L), dbRecord.getLastUpdatedSeqNumber());
}
@@ -189,9 +186,8 @@ public class TestReconTaskControllerImpl extends
AbstractReconSqlDBTest {
long taskTimeStamp = reconTaskStatus.getLastUpdatedTimestamp();
long seqNumber = reconTaskStatus.getLastUpdatedSeqNumber();
- Assert.assertTrue(startTime <= taskTimeStamp
- && taskTimeStamp <= endTime);
- Assert.assertEquals(seqNumber,
+ assertTrue(startTime <= taskTimeStamp && taskTimeStamp <= endTime);
+ assertEquals(seqNumber,
omMetadataManagerMock.getLastSequenceNumberFromDB());
}
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
index 81c406e5af..8151b2385b 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/tasks/TestTableCountTask.java
@@ -34,8 +34,8 @@ import static
org.apache.hadoop.ozone.om.OmMetadataManagerImpl.OPEN_KEY_TABLE;
import static org.apache.hadoop.ozone.om.OmMetadataManagerImpl.VOLUME_TABLE;
import org.hadoop.ozone.recon.schema.tables.daos.GlobalStatsDao;
import org.jooq.DSLContext;
-import org.junit.Before;
-import org.junit.Test;
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.Test;
import java.io.IOException;
import java.util.ArrayList;
@@ -46,8 +46,8 @@ import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateActi
import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.PUT;
import static
org.apache.hadoop.ozone.recon.tasks.OMDBUpdateEvent.OMDBUpdateAction.UPDATE;
import static
org.hadoop.ozone.recon.schema.tables.GlobalStatsTable.GLOBAL_STATS;
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -71,7 +71,7 @@ public class TestTableCountTask extends
AbstractReconSqlDBTest {
dslContext = getDslContext();
}
- @Before
+ @BeforeEach
public void setUp() throws IOException {
// The following setup runs only once
if (!isSetupDone) {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]