This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new cc23ace26e HDDS-9981. Improve assertTrue assertions in remaining hdds
modules (#6093)
cc23ace26e is described below
commit cc23ace26e4aac3009e439b10cbc75f065fd57e7
Author: Zhaohui Wang <[email protected]>
AuthorDate: Thu Jan 25 23:21:47 2024 +0800
HDDS-9981. Improve assertTrue assertions in remaining hdds modules (#6093)
---
.../hadoop/hdds/scm/storage/TestBufferPool.java | 6 ++---
.../TestECBlockReconstructedStripeInputStream.java | 3 ++-
.../common/impl/TestContainerPersistence.java | 4 +--
.../container/ozoneimpl/TestContainerReader.java | 2 +-
.../ozone/erasurecode/TestCodecRegistry.java | 29 ++++++++++-----------
.../erasurecode/rawcoder/TestRawCoderBase.java | 6 ++---
.../apache/hadoop/hdds/utils/db/TestRDBStore.java | 2 +-
.../db/managed/TestManagedSSTDumpIterator.java | 4 +--
.../rocksdiff/TestRocksDBCheckpointDiffer.java | 7 ++---
.../hdds/scm/node/TestDatanodeAdminMonitor.java | 5 ++--
.../hdds/scm/node/TestNodeDecommissionMetrics.java | 6 ++---
.../hdds/scm/pipeline/TestPipelineManagerImpl.java | 14 +++++-----
.../hdds/scm/safemode/TestSCMSafeModeManager.java | 2 +-
.../scm/cli/datanode/TestUsageInfoSubcommand.java | 30 +++++++++++-----------
14 files changed, 61 insertions(+), 59 deletions(-)
diff --git
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
index aabbbb3eed..b56c503df9 100644
---
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
+++
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/hdds/scm/storage/TestBufferPool.java
@@ -26,8 +26,8 @@ import java.util.Deque;
import java.util.LinkedList;
import java.util.concurrent.ThreadLocalRandom;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -79,8 +79,8 @@ class TestBufferPool {
assertEmpty(allocated, size);
fill(allocated); // make buffer contents unique, for equals check
- assertFalse(buffers.contains(allocated),
- () -> "buffer " + n + ": " + allocated + " already in: " + buffers);
+ assertThat(buffers).withFailMessage("buffer " + n + ": " + allocated + "
already in: " + buffers)
+ .doesNotContain(allocated);
buffers.addLast(allocated);
}
diff --git
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
index c32cea0951..f7a4bb0643 100644
---
a/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
+++
b/hadoop-hdds/client/src/test/java/org/apache/hadoop/ozone/client/io/TestECBlockReconstructedStripeInputStream.java
@@ -52,6 +52,7 @@ import static java.util.Collections.emptySet;
import static java.util.Collections.singleton;
import static java.util.stream.Collectors.toSet;
import static
org.apache.hadoop.ozone.client.io.ECStreamTestUtil.generateParity;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -800,7 +801,7 @@ public class TestECBlockReconstructedStripeInputStream {
// created in the stream factory, indicating we did not read them.
List<TestBlockInputStream> streams = streamFactory.getBlockStreams();
for (TestBlockInputStream stream : streams) {
- assertTrue(stream.getEcReplicaIndex() > 2);
+ assertThat(stream.getEcReplicaIndex()).isGreaterThan(2);
}
}
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
index fcc48fef1b..12500201a8 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
@@ -544,7 +544,7 @@ public class TestContainerPersistence {
long actualContainerID = report.getContainerID();
assertTrue(containerIDs.remove(actualContainerID));
}
- assertTrue(containerIDs.isEmpty());
+ assertThat(containerIDs).isEmpty();
}
/**
@@ -585,7 +585,7 @@ public class TestContainerPersistence {
}
// Assert that we listed all the keys that we had put into
// container.
- assertTrue(testMap.isEmpty());
+ assertThat(testMap).isEmpty();
}
private ChunkInfo writeChunkHelper(BlockID blockID) throws IOException {
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
index d48f0d3314..6ce1ffb08e 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestContainerReader.java
@@ -358,7 +358,7 @@ public class TestContainerReader {
hddsVolume1, containerSet1, conf, true);
containerReader.readVolume(hddsVolume1.getHddsRootDir());
assertEquals(0, containerSet1.containerCount());
- assertTrue(dnLogs.getOutput().contains("Container DB file is missing"));
+ assertThat(dnLogs.getOutput()).contains("Container DB file is missing");
}
@ContainerTestVersionInfo.ContainerTest
diff --git
a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
index fcdbacbec1..f4e1794519 100644
---
a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
+++
b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/TestCodecRegistry.java
@@ -31,9 +31,10 @@ import java.util.ArrayList;
import java.util.List;
import java.util.Set;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test CodecRegistry.
@@ -44,10 +45,8 @@ public class TestCodecRegistry {
public void testGetCodecs() {
Set<String> codecs = CodecRegistry.getInstance().getCodecNames();
assertEquals(2, codecs.size());
- assertTrue(
- codecs.contains(ECReplicationConfig.EcCodec.RS.name().toLowerCase()));
- assertTrue(
- codecs.contains(ECReplicationConfig.EcCodec.XOR.name().toLowerCase()));
+
assertThat(codecs).contains(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
+
assertThat(codecs).contains(ECReplicationConfig.EcCodec.XOR.name().toLowerCase());
}
@Test
@@ -55,14 +54,14 @@ public class TestCodecRegistry {
List<RawErasureCoderFactory> coders = CodecRegistry.getInstance().
getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
assertEquals(2, coders.size());
- assertTrue(coders.get(0) instanceof NativeRSRawErasureCoderFactory);
- assertTrue(coders.get(1) instanceof RSRawErasureCoderFactory);
+ assertInstanceOf(NativeRSRawErasureCoderFactory.class, coders.get(0));
+ assertInstanceOf(RSRawErasureCoderFactory.class, coders.get(1));
coders = CodecRegistry.getInstance().
getCoders(ECReplicationConfig.EcCodec.XOR.name().toLowerCase());
assertEquals(2, coders.size());
- assertTrue(coders.get(0) instanceof NativeXORRawErasureCoderFactory);
- assertTrue(coders.get(1) instanceof XORRawErasureCoderFactory);
+ assertInstanceOf(NativeXORRawErasureCoderFactory.class, coders.get(0));
+ assertInstanceOf(XORRawErasureCoderFactory.class, coders.get(1));
}
@Test
@@ -108,8 +107,8 @@ public class TestCodecRegistry {
List<RawErasureCoderFactory> rsCoders = CodecRegistry.getInstance().
getCoders(ECReplicationConfig.EcCodec.RS.name().toLowerCase());
assertEquals(2, rsCoders.size());
- assertTrue(rsCoders.get(0) instanceof NativeRSRawErasureCoderFactory);
- assertTrue(rsCoders.get(1) instanceof RSRawErasureCoderFactory);
+ assertInstanceOf(NativeRSRawErasureCoderFactory.class, rsCoders.get(0));
+ assertInstanceOf(RSRawErasureCoderFactory.class, rsCoders.get(1));
// check RS coder names
String[] rsCoderNames = CodecRegistry.getInstance().
@@ -139,21 +138,21 @@ public class TestCodecRegistry {
RawErasureCoderFactory coder = CodecRegistry.getInstance().
getCoderByName(ECReplicationConfig.EcCodec.RS.name().toLowerCase(),
RSRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof RSRawErasureCoderFactory);
+ assertInstanceOf(RSRawErasureCoderFactory.class, coder);
coder = CodecRegistry.getInstance()
.getCoderByName(ECReplicationConfig.EcCodec.RS.name().toLowerCase(),
NativeRSRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof NativeRSRawErasureCoderFactory);
+ assertInstanceOf(NativeRSRawErasureCoderFactory.class, coder);
coder = CodecRegistry.getInstance()
.getCoderByName(ECReplicationConfig.EcCodec.XOR.name().toLowerCase(),
XORRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof XORRawErasureCoderFactory);
+ assertInstanceOf(XORRawErasureCoderFactory.class, coder);
coder = CodecRegistry.getInstance()
.getCoderByName(ECReplicationConfig.EcCodec.XOR.name().toLowerCase(),
NativeXORRawErasureCoderFactory.CODER_NAME);
- assertTrue(coder instanceof NativeXORRawErasureCoderFactory);
+ assertInstanceOf(NativeXORRawErasureCoderFactory.class, coder);
}
}
diff --git
a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
index 1f8cff4338..2508e603db 100644
---
a/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
+++
b/hadoop-hdds/erasurecode/src/test/java/org/apache/ozone/erasurecode/rawcoder/TestRawCoderBase.java
@@ -24,9 +24,9 @@ import org.junit.jupiter.api.Test;
import java.io.IOException;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -132,13 +132,13 @@ public abstract class TestRawCoderBase extends
TestCoderBase {
final ECChunk[] parity = prepareParityChunksForEncoding();
IOException ioException = assertThrows(IOException.class,
() -> encoder.encode(data, parity));
- assertTrue(ioException.getMessage().contains("closed"));
+ assertThat(ioException.getMessage()).contains("closed");
decoder.release();
final ECChunk[] in = prepareInputChunksForDecoding(data, parity);
final ECChunk[] out = prepareOutputChunksForDecoding();
ioException = assertThrows(IOException.class,
() -> decoder.decode(in, getErasedIndexesForDecoding(), out));
- assertTrue(ioException.getMessage().contains("closed"));
+ assertThat(ioException.getMessage()).contains("closed");
}
@Test
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
index ee589ca8a3..56a16422d0 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/db/TestRDBStore.java
@@ -248,7 +248,7 @@ public class TestRDBStore {
int count = families.size();
// Assert that we have all the tables in the list and no more.
for (String name : families) {
- assertTrue(hashTable.containsKey(name));
+ assertThat(hashTable).containsKey(name);
count--;
}
assertEquals(0, count);
diff --git
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
index d8fefeb9b7..d2796c19fc 100644
---
a/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
+++
b/hadoop-hdds/rocks-native/src/test/java/org/apache/hadoop/hdds/utils/db/managed/TestManagedSSTDumpIterator.java
@@ -54,9 +54,9 @@ import java.util.stream.IntStream;
import java.util.stream.Stream;
import static
org.apache.hadoop.hdds.utils.NativeConstants.ROCKS_TOOLS_NATIVE_LIBRARY_NAME;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assumptions.assumeTrue;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
@@ -234,7 +234,7 @@ class TestManagedSSTDumpIterator {
ManagedSSTDumpIterator.KeyValue r = iterator.next();
String key = new String(r.getKey(), StandardCharsets.UTF_8);
Pair<String, Integer> recordKey = Pair.of(key, r.getType());
- assertTrue(expectedKeys.containsKey(recordKey));
+ assertThat(expectedKeys).containsKey(recordKey);
assertEquals(Optional.ofNullable(expectedKeys
.get(recordKey)).orElse(""),
new String(r.getValue(), StandardCharsets.UTF_8));
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index f70b85daeb..bb7fd559b9 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -96,6 +96,7 @@ import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COMPACTION_LOG_
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_DAG_LIVE_NODES;
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.DEBUG_READ_ALL_DB_KEYS;
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.SST_FILE_EXTENSION;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -1867,7 +1868,7 @@ public class TestRocksDBCheckpointDiffer {
createKeys(compactionLogTableCFHandle, "logName-", "logValue-", 100);
// Make sures that some compaction happened.
- assertFalse(rocksDBCheckpointDiffer.getCompactionNodeMap().isEmpty());
+ assertThat(rocksDBCheckpointDiffer.getCompactionNodeMap()).isNotEmpty();
List<CompactionNode> compactionNodes = rocksDBCheckpointDiffer.
getCompactionNodeMap().values().stream()
@@ -1877,7 +1878,7 @@ public class TestRocksDBCheckpointDiffer {
// CompactionNodeMap should not contain any node other than 'keyTable',
// 'directoryTable' and 'fileTable' column families nodes.
- assertTrue(compactionNodes.isEmpty());
+ assertThat(compactionNodes).isEmpty();
// Assert that only 'keyTable', 'directoryTable' and 'fileTable'
// column families SST files are backed-up.
@@ -1889,7 +1890,7 @@ public class TestRocksDBCheckpointDiffer {
fileReader.open(path.toAbsolutePath().toString());
String columnFamily = StringUtils.bytes2String(
fileReader.getTableProperties().getColumnFamilyName());
- assertTrue(COLUMN_FAMILIES_TO_TRACK_IN_DAG.contains(columnFamily));
+ assertThat(COLUMN_FAMILIES_TO_TRACK_IN_DAG).contains(columnFamily);
} catch (RocksDBException rocksDBException) {
fail("Failed to read file: " + path.toAbsolutePath());
}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
index f4002a7da1..06565e1b7e 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestDatanodeAdminMonitor.java
@@ -48,6 +48,7 @@ import java.util.HashSet;
import java.util.Set;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.mock;
@@ -691,8 +692,8 @@ public class TestDatanodeAdminMonitor {
assertEquals(1, monitor.getTrackedNodeCount());
long monitoredTime = monitor.getSingleTrackedNode(dn1.getIpAddress())
.getStartTime();
- assertTrue(monitoredTime >= beforeTime);
- assertTrue(monitoredTime <= afterTime);
+ assertThat(monitoredTime).isGreaterThanOrEqualTo(beforeTime);
+ assertThat(monitoredTime).isLessThanOrEqualTo(afterTime);
}
@Test
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java
index 2005d518ef..d9cd79b752 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestNodeDecommissionMetrics.java
@@ -35,7 +35,7 @@ import java.util.HashSet;
import java.util.Set;
import static org.mockito.Mockito.mock;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONED;
import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState.DECOMMISSIONING;
@@ -346,7 +346,7 @@ public class TestNodeDecommissionMetrics {
monitor.run();
long startTime = monitor.getSingleTrackedNode(dn1.getIpAddress())
.getStartTime();
- assertTrue(before <= startTime);
- assertTrue(after >= startTime);
+ assertThat(before).isLessThanOrEqualTo(startTime);
+ assertThat(after).isGreaterThanOrEqualTo(startTime);
}
}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
index 270ae0ef49..455276f754 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/pipeline/TestPipelineManagerImpl.java
@@ -216,7 +216,7 @@ public class TestPipelineManagerImpl {
PipelineManagerImpl pipelineManager2 =
createPipelineManager(true, buffer2);
// Should be able to load previous pipelines.
- assertFalse(pipelineManager2.getPipelines().isEmpty());
+ assertThat(pipelineManager2.getPipelines()).isNotEmpty();
assertEquals(3, pipelineManager.getPipelines().size());
Pipeline pipeline3 = pipelineManager2.createPipeline(
RatisReplicationConfig.getInstance(ReplicationFactor.THREE));
@@ -261,10 +261,10 @@ public class TestPipelineManagerImpl {
assertEquals(Pipeline.PipelineState.DORMANT,
pipelineManager.getPipeline(pipelineID).getPipelineState());
buffer.flush();
assertEquals(Pipeline.PipelineState.DORMANT,
pipelineStore.get(pipeline.getId()).getPipelineState());
- assertFalse(pipelineManager
+ assertThat(pipelineManager
.getPipelines(RatisReplicationConfig
.getInstance(ReplicationFactor.THREE),
- Pipeline.PipelineState.OPEN).contains(pipeline));
+ Pipeline.PipelineState.OPEN)).doesNotContain(pipeline);
assertEquals(1, pipelineManager.getPipelineCount(
RatisReplicationConfig.getInstance(ReplicationFactor.THREE),
Pipeline.PipelineState.DORMANT));
@@ -573,16 +573,16 @@ public class TestPipelineManagerImpl {
pipelineManager.scrubPipelines();
// The allocatedPipeline should now be scrubbed as the interval has passed
- assertFalse(pipelineManager
+ assertThat(pipelineManager
.getPipelines(RatisReplicationConfig
.getInstance(ReplicationFactor.THREE),
- Pipeline.PipelineState.ALLOCATED).contains(allocatedPipeline));
+
Pipeline.PipelineState.ALLOCATED)).doesNotContain(allocatedPipeline);
// The closedPipeline should now be scrubbed as the interval has passed
- assertFalse(pipelineManager
+ assertThat(pipelineManager
.getPipelines(RatisReplicationConfig
.getInstance(ReplicationFactor.THREE),
- Pipeline.PipelineState.CLOSED).contains(closedPipeline));
+ Pipeline.PipelineState.CLOSED)).doesNotContain(closedPipeline);
pipelineManager.close();
}
diff --git
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
index 4f08462342..f9114b1d5a 100644
---
a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
+++
b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/safemode/TestSCMSafeModeManager.java
@@ -397,7 +397,7 @@ public class TestSCMSafeModeManager {
if (entry.getKey().equals(safeModeRule)) {
Pair<Boolean, String> value = entry.getValue();
assertEquals(false, value.getLeft());
- assertTrue(value.getRight().contains(stringToMatch));
+ assertThat(value.getRight()).contains(stringToMatch);
}
}
}
diff --git
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
index db777e4396..09f6621735 100644
---
a/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
+++
b/hadoop-hdds/tools/src/test/java/org/apache/hadoop/hdds/scm/cli/datanode/TestUsageInfoSubcommand.java
@@ -37,9 +37,9 @@ import java.util.ArrayList;
import java.util.List;
import static com.fasterxml.jackson.databind.node.JsonNodeType.ARRAY;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.Mockito.anyBoolean;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.mock;
@@ -111,20 +111,20 @@ public class TestUsageInfoSubcommand {
// then
String output = outContent.toString(CharEncoding.UTF_8);
- assertTrue(output.contains("UUID :"));
- assertTrue(output.contains("IP Address :"));
- assertTrue(output.contains("Hostname :"));
- assertTrue(output.contains("Capacity :"));
- assertTrue(output.contains("Total Used :"));
- assertTrue(output.contains("Total Used % :"));
- assertTrue(output.contains("Ozone Used :"));
- assertTrue(output.contains("Ozone Used % :"));
- assertTrue(output.contains("Remaining :"));
- assertTrue(output.contains("Remaining % :"));
- assertTrue(output.contains("Container(s) :"));
- assertTrue(output.contains("Container Pre-allocated :"));
- assertTrue(output.contains("Remaining Allocatable :"));
- assertTrue(output.contains("Free Space To Spare :"));
+ assertThat(output).contains("UUID :");
+ assertThat(output).contains("IP Address :");
+ assertThat(output).contains("Hostname :");
+ assertThat(output).contains("Capacity :");
+ assertThat(output).contains("Total Used :");
+ assertThat(output).contains("Total Used % :");
+ assertThat(output).contains("Ozone Used :");
+ assertThat(output).contains("Ozone Used % :");
+ assertThat(output).contains("Remaining :");
+ assertThat(output).contains("Remaining % :");
+ assertThat(output).contains("Container(s) :");
+ assertThat(output).contains("Container Pre-allocated :");
+ assertThat(output).contains("Remaining Allocatable :");
+ assertThat(output).contains("Free Space To Spare :");
}
private List<HddsProtos.DatanodeUsageInfoProto> getUsageProto() {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]