This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 0bb733e4ee HDDS-9978. Improve assertTrue assertions in OM integration
tests (#5850)
0bb733e4ee is described below
commit 0bb733e4eeed26f938b17a79e49a7a0b0cc2bed2
Author: Zhaohui Wang <[email protected]>
AuthorDate: Sat Dec 23 00:36:21 2023 +0800
HDDS-9978. Improve assertTrue assertions in OM integration tests (#5850)
---
.../hadoop/ozone/om/TestAddRemoveOzoneManager.java | 45 +++++++++++----------
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 23 ++++++-----
.../org/apache/hadoop/ozone/om/TestKeyPurging.java | 4 +-
.../org/apache/hadoop/ozone/om/TestListStatus.java | 4 +-
.../hadoop/ozone/om/TestOMDbCheckpointServlet.java | 33 ++++++++-------
.../hadoop/ozone/om/TestOMRatisSnapshots.java | 47 ++++++++++------------
.../org/apache/hadoop/ozone/om/TestOmAcls.java | 30 +++++++-------
.../org/apache/hadoop/ozone/om/TestOmMetrics.java | 5 +--
.../ozone/om/TestOmSnapshotDisabledRestart.java | 4 +-
.../hadoop/ozone/om/TestOmSnapshotFileSystem.java | 11 ++---
.../om/TestOzoneManagerHAWithStoppedNodes.java | 9 ++---
.../hadoop/ozone/om/TestOzoneManagerPrepare.java | 8 ++--
.../ozone/om/TestOzoneManagerRocksDBLogging.java | 4 +-
.../apache/hadoop/ozone/om/TestScmSafeMode.java | 13 +++---
.../om/multitenant/TestMultiTenantVolume.java | 7 ++--
.../ozone/om/service/TestRangerBGSyncService.java | 22 +++++-----
.../hadoop/ozone/om/snapshot/TestOmSnapshot.java | 9 +++--
17 files changed, 140 insertions(+), 138 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
index d438ad09fc..632974475a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
@@ -57,6 +57,7 @@ import org.slf4j.event.Level;
import static org.apache.hadoop.ozone.OzoneConsts.SCM_DUMMY_SERVICE_ID;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_RATIS_SERVER_REQUEST_TIMEOUT_DEFAULT;
import static org.apache.hadoop.ozone.om.TestOzoneManagerHA.createKey;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -131,10 +132,9 @@ public class TestAddRemoveOzoneManager {
+ " not present in Peer list of OM " + om.getOMNodeId());
assertTrue(om.getOmRatisServer().doesPeerExist(nodeId), "New OM node " +
nodeId
+ " not present in Peer list of OM " + om.getOMNodeId() + "
RatisServer");
- assertTrue(
- om.getOmRatisServer().getCurrentPeersFromRaftConf().contains(nodeId),
- "New OM node " + nodeId + " not present in " + "OM "
- + om.getOMNodeId() + "RatisServer's RaftConf");
+ assertThat(om.getOmRatisServer().getCurrentPeersFromRaftConf())
+ .withFailMessage("New OM node " + nodeId + " not present in " +
om.getOMNodeId() + "'s RaftConf")
+ .contains(nodeId);
}
OzoneManager newOM = cluster.getOzoneManager(nodeId);
@@ -144,7 +144,8 @@ public class TestAddRemoveOzoneManager {
// Check Ratis Dir for log files
File[] logFiles = getRatisLogFiles(newOM);
- assertTrue(logFiles.length > 0, "There are no ratis logs in new OM ");
+ assertThat(logFiles.length).withFailMessage("There are no ratis logs in
new OM ")
+ .isGreaterThan(0);
}
private File[] getRatisLogFiles(OzoneManager om) {
@@ -197,9 +198,9 @@ public class TestAddRemoveOzoneManager {
GenericTestUtils.waitFor(() -> cluster.getOMLeader() != null, 500, 30000);
OzoneManager omLeader = cluster.getOMLeader();
- assertTrue(newOMNodeIds.contains(omLeader.getOMNodeId()),
- "New Bootstrapped OM not elected Leader even though" +
- " other OMs are down");
+ assertThat(newOMNodeIds)
+ .withFailMessage("New Bootstrapped OM not elected Leader even though"
+ " other OMs are down")
+ .contains(omLeader.getOMNodeId());
// Perform some read and write operations with new OM leader
IOUtils.closeQuietly(client);
@@ -247,10 +248,10 @@ public class TestAddRemoveOzoneManager {
Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or"
+
" have incorrect information of the bootstrapping OM. Update their "
+
"ozone-site.xml before proceeding.", e.getMessage());
- assertTrue(omLog.getOutput().contains("Remote OM config check " +
- "failed on OM " + existingOMNodeId));
- assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId +
- " - System Exit"));
+ assertThat(omLog.getOutput()).contains("Remote OM config check " +
+ "failed on OM " + existingOMNodeId);
+ assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId +
+ " - System Exit");
}
/***************************************************************************
@@ -268,11 +269,11 @@ public class TestAddRemoveOzoneManager {
try {
cluster.bootstrapOzoneManager(newNodeId, false, true);
} catch (IOException e) {
- assertTrue(omLog.getOutput().contains("Couldn't add OM " +
- newNodeId + " to peer list."));
- assertTrue(miniOzoneClusterLog.getOutput().contains(
+ assertThat(omLog.getOutput()).contains("Couldn't add OM " +
+ newNodeId + " to peer list.");
+ assertThat(miniOzoneClusterLog.getOutput()).contains(
existingOMNodeId + " - System Exit: There is no OM configuration " +
- "for node ID " + newNodeId + " in ozone-site.xml."));
+ "for node ID " + newNodeId + " in ozone-site.xml.");
// Verify that the existing OM has stopped.
assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning());
@@ -321,12 +322,12 @@ public class TestAddRemoveOzoneManager {
Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " +
"have incorrect information of the bootstrapping OM. Update their " +
"ozone-site.xml before proceeding.", e.getMessage());
- assertTrue(omLog.getOutput().contains("Remote OM " + downOMNodeId +
- " configuration returned null"));
- assertTrue(omLog.getOutput().contains("Remote OM config check " +
- "failed on OM " + downOMNodeId));
- assertTrue(miniOzoneClusterLog.getOutput().contains(newNodeId +
- " - System Exit"));
+ assertThat(omLog.getOutput()).contains("Remote OM " + downOMNodeId +
+ " configuration returned null");
+ assertThat(omLog.getOutput()).contains("Remote OM config check " +
+ "failed on OM " + downOMNodeId);
+ assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId +
+ " - System Exit");
}
/***************************************************************************
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index b878f92019..1521b4c614 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -127,6 +127,7 @@ import org.junit.jupiter.params.provider.MethodSource;
import org.mockito.Mockito;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -317,8 +318,8 @@ public class TestKeyManagerImpl {
OMException omException = assertThrows(OMException.class,
() ->
writeClient.allocateBlock(keyArgs, 1L, new ExcludeList()));
- assertTrue(omException.getMessage()
- .contains("SafeModePrecheck failed for allocateBlock"));
+ assertThat(omException.getMessage())
+ .contains("SafeModePrecheck failed for allocateBlock");
}
@Test
@@ -334,8 +335,8 @@ public class TestKeyManagerImpl {
.build();
OMException omException = assertThrows(OMException.class,
() -> writeClient.openKey(keyArgs));
- assertTrue(omException.getMessage()
- .contains("SafeModePrecheck failed for allocateBlock"));
+ assertThat(omException.getMessage())
+ .contains("SafeModePrecheck failed for allocateBlock");
}
@Test
@@ -847,9 +848,9 @@ public class TestKeyManagerImpl {
// lookup key, random node as client
OmKeyInfo key4 = keyManager.lookupKey(keyArgs, resolvedBucket(),
"/d=default-drack/127.0.0.1");
- assertTrue(
- keyPipeline.getNodes().containsAll(key4.getLatestVersionLocations()
- .getLocationList().get(0).getPipeline().getNodesInOrder()));
+ assertThat(keyPipeline.getNodes())
+ .containsAll(key4.getLatestVersionLocations()
+ .getLocationList().get(0).getPipeline().getNodesInOrder());
}
@NotNull
@@ -1592,11 +1593,11 @@ public class TestKeyManagerImpl {
}
// verify filestatus is present in directory or file set accordingly
if (fileStatus.isDirectory()) {
- assertTrue(directorySet.contains(normalizedKeyName),
- directorySet + " doesn't contain " + normalizedKeyName);
+ assertThat(directorySet).withFailMessage(directorySet +
+ " doesn't contain " +
normalizedKeyName).contains(normalizedKeyName);
} else {
- assertTrue(fileSet.contains(normalizedKeyName),
- fileSet + " doesn't contain " + normalizedKeyName);
+ assertThat(fileSet).withFailMessage(fileSet + " doesn't contain " +
normalizedKeyName)
+ .contains(normalizedKeyName);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
index 066ff6e1db..83eac0ab28 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyPurging.java
@@ -45,7 +45,7 @@ import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
-import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.assertj.core.api.Assertions.assertThat;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.hdds.HddsConfigKeys.HDDS_CONTAINER_REPORT_INTERVAL;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
@@ -134,7 +134,7 @@ public class TestKeyPurging {
() -> keyDeletingService.getDeletedKeyCount().get() >= NUM_KEYS,
1000, 10000);
- assertTrue(keyDeletingService.getRunCount().get() > 1);
+ assertThat(keyDeletingService.getRunCount().get()).isGreaterThan(1);
GenericTestUtils.waitFor(
() -> {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java
index 13e4440236..a24e78617f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestListStatus.java
@@ -35,8 +35,8 @@ import org.junit.jupiter.api.Timeout;
import java.io.IOException;
import java.util.UUID;
import java.util.List;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.apache.hadoop.ozone.OzoneConfigKeys.
OZONE_FS_ITERATE_BATCH_SIZE;
@@ -198,7 +198,7 @@ public class TestListStatus {
OzoneFileStatus stNext = statuses.get(i + 1);
System.out.println("status:" + stCurr);
- assertTrue(stCurr.getPath().compareTo(stNext.getPath()) < 0);
+ assertThat(stCurr.getPath().compareTo(stNext.getPath())).isLessThan(0);
}
if (!statuses.isEmpty()) {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
index a835944eef..fba6a72363 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMDbCheckpointServlet.java
@@ -105,6 +105,7 @@ import static
org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.DATA_SUFFIX;
import static
org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils.truncateFileName;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COMPACTION_LOG_FILE_NAME_SUFFIX;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.params.provider.Arguments.arguments;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyBoolean;
@@ -275,15 +276,13 @@ public class TestOMDbCheckpointServlet {
doEndpoint();
- Assertions.assertTrue(tempFile.length() > 0);
- Assertions.assertTrue(
- omMetrics.getDBCheckpointMetrics().
- getLastCheckpointCreationTimeTaken() > 0);
- Assertions.assertTrue(
- omMetrics.getDBCheckpointMetrics().
- getLastCheckpointStreamingTimeTaken() > 0);
- Assertions.assertTrue(omMetrics.getDBCheckpointMetrics().
- getNumCheckpoints() > initialCheckpointCount);
+ assertThat(tempFile.length()).isGreaterThan(0);
+
assertThat(omMetrics.getDBCheckpointMetrics().getLastCheckpointCreationTimeTaken())
+ .isGreaterThan(0);
+
assertThat(omMetrics.getDBCheckpointMetrics().getLastCheckpointStreamingTimeTaken())
+ .isGreaterThan(0);
+ assertThat(omMetrics.getDBCheckpointMetrics().getNumCheckpoints())
+ .isGreaterThan(initialCheckpointCount);
Mockito.verify(omDbCheckpointServletMock).writeDbDataToStream(any(),
any(), any(), eq(toExcludeList), any(), any());
@@ -383,7 +382,7 @@ public class TestOMDbCheckpointServlet {
// Recon user should be able to access the servlet and download the
// snapshot
- Assertions.assertTrue(tempFile.length() > 0);
+ assertThat(tempFile.length()).isGreaterThan(0);
}
@Test
@@ -483,8 +482,8 @@ public class TestOMDbCheckpointServlet {
Set<String> finalCheckpointSet = getFiles(finalCheckpointLocation,
newDbDirLength);
- Assertions.assertTrue(finalCheckpointSet.contains(OM_HARDLINK_FILE),
- "hardlink file exists in checkpoint dir");
+ assertThat(finalCheckpointSet).withFailMessage("hardlink file exists in
checkpoint dir")
+ .contains(OM_HARDLINK_FILE);
finalCheckpointSet.remove(OM_HARDLINK_FILE);
Assertions.assertEquals(initialCheckpointSet, finalCheckpointSet);
@@ -522,10 +521,10 @@ public class TestOMDbCheckpointServlet {
Set<String> initialFullSet =
getFiles(Paths.get(metaDir.toString(), OM_SNAPSHOT_DIR),
metaDirLength);
- Assertions.assertTrue(finalFullSet.contains(expectedLogStr));
- Assertions.assertTrue(finalFullSet.contains(expectedSstStr));
- Assertions.assertTrue(initialFullSet.contains(unExpectedLogStr));
- Assertions.assertTrue(initialFullSet.contains(unExpectedSstStr));
+ assertThat(finalFullSet).contains(expectedLogStr);
+ assertThat(finalFullSet).contains(expectedSstStr);
+ assertThat(initialFullSet).contains(unExpectedLogStr);
+ assertThat(initialFullSet).contains(unExpectedSstStr);
// Remove the dummy files that should not have been copied over
// from the expected data.
@@ -628,7 +627,7 @@ public class TestOMDbCheckpointServlet {
testDirLength);
initialCheckpointSet.removeAll(finalCheckpointSet);
- Assertions.assertTrue(initialCheckpointSet.contains(dummyFile.getName()));
+ assertThat(initialCheckpointSet).contains(dummyFile.getName());
}
/**
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
index 093f1107b5..920c182c58 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOMRatisSnapshots.java
@@ -94,6 +94,7 @@ import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_SNAPSHOT_SST_FILTERI
import static org.apache.hadoop.ozone.om.OmSnapshotManager.OM_HARDLINK_FILE;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static
org.apache.hadoop.ozone.om.TestOzoneManagerHAWithStoppedNodes.createKey;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
@@ -262,17 +263,16 @@ public class TestOMRatisSnapshots {
long followerOMLastAppliedIndex =
followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex();
- assertTrue(
- followerOMLastAppliedIndex >= leaderOMSnapshotIndex - 1);
+
assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex
- 1);
// After the new checkpoint is installed, the follower OM
// lastAppliedIndex must >= the snapshot index of the checkpoint. It
// could be great than snapshot index if there is any conf entry from
ratis.
followerOMLastAppliedIndex = followerOM.getOmRatisServer()
.getLastAppliedTermIndex().getIndex();
- assertTrue(followerOMLastAppliedIndex >= leaderOMSnapshotIndex);
- assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex()
- .getTerm() >= leaderOMSnapshotTermIndex);
+
assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex);
+ assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex()
+ .getTerm()).isGreaterThanOrEqualTo(leaderOMSnapshotTermIndex);
// Verify checkpoint installation was happened.
String msg = "Reloaded OM state";
@@ -317,7 +317,7 @@ public class TestOMRatisSnapshots {
sstFileUnion.addAll(sstFiles);
}
// Confirm that there were multiple tarballs.
- assertTrue(sstSetList.size() > 1);
+ assertThat(sstSetList.size()).isGreaterThan(1);
// Confirm that there was no overlap of sst files
// between the individual tarballs.
assertEquals(sstFileUnion.size(), sstFileCount);
@@ -378,7 +378,8 @@ public class TestOMRatisSnapshots {
}
}
}
- Assertions.assertTrue(hardLinkCount > 0, "No hard links were found");
+ assertThat(hardLinkCount).withFailMessage("No hard links were found")
+ .isGreaterThan(0);
}
@Test
@@ -471,8 +472,7 @@ public class TestOMRatisSnapshots {
// Verify the metrics recording the incremental checkpoint at leader side
DBCheckpointMetrics dbMetrics = leaderOM.getMetrics().
getDBCheckpointMetrics();
- Assertions.assertTrue(
- dbMetrics.getLastCheckpointStreamingNumSSTExcluded() > 0);
+
assertThat(dbMetrics.getLastCheckpointStreamingNumSSTExcluded()).isGreaterThan(0);
assertEquals(2, dbMetrics.getNumIncrementalCheckpoints());
// Verify RPC server is running
@@ -552,9 +552,8 @@ public class TestOMRatisSnapshots {
followerOM.getOmSnapshotProvider().getNumDownloaded() ==
expectedNumDownloads, 1000, 30_000);
- assertTrue(followerOM.getOmRatisServer().
- getLastAppliedTermIndex().getIndex()
- >= leaderOMSnapshotIndex - 1);
+
assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex())
+ .isGreaterThanOrEqualTo(leaderOMSnapshotIndex - 1);
// Now confirm tarball is just incremental and contains no unexpected
// files/links.
@@ -567,7 +566,7 @@ public class TestOMRatisSnapshots {
// Confirm that none of the files in the tarball match one in the
// candidate dir.
- assertTrue(sstFiles.size() > 0);
+ assertThat(sstFiles.size()).isGreaterThan(0);
for (String s: sstFiles) {
File sstFile = Paths.get(followerCandidatePath.toString(), s).toFile();
assertFalse(sstFile.exists(),
@@ -588,7 +587,7 @@ public class TestOMRatisSnapshots {
"Incremental checkpoint should not " +
"duplicate existing links");
}
- assertTrue(lineCount > 0);
+ assertThat(lineCount).isGreaterThan(0);
}
return id;
}
@@ -648,7 +647,7 @@ public class TestOMRatisSnapshots {
File followerCandidateDir = followerOM.getOmSnapshotProvider().
getCandidateDir();
List<String> sstList = HAUtils.getExistingSstFiles(followerCandidateDir);
- Assertions.assertTrue(sstList.size() > 0);
+ assertThat(sstList.size()).isGreaterThan(0);
Collections.shuffle(sstList);
List<String> victimSstList = sstList.subList(0, sstList.size() / 3);
for (String sst: victimSstList) {
@@ -799,17 +798,16 @@ public class TestOMRatisSnapshots {
long followerOMLastAppliedIndex =
followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex();
- assertTrue(
- followerOMLastAppliedIndex >= leaderOMSnapshotIndex - 1);
+
assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex
- 1);
// After the new checkpoint is installed, the follower OM
// lastAppliedIndex must >= the snapshot index of the checkpoint. It
// could be great than snapshot index if there is any conf entry from
ratis.
followerOMLastAppliedIndex = followerOM.getOmRatisServer()
.getLastAppliedTermIndex().getIndex();
- assertTrue(followerOMLastAppliedIndex >= leaderOMSnapshotIndex);
- assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex()
- .getTerm() >= leaderOMSnapshotTermIndex);
+
assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex);
+ assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex()
+ .getTerm()).isGreaterThanOrEqualTo(leaderOMSnapshotTermIndex);
// Verify that the follower OM's DB contains the transactions which were
// made while it was inactive.
@@ -898,17 +896,16 @@ public class TestOMRatisSnapshots {
long followerOMLastAppliedIndex =
followerOM.getOmRatisServer().getLastAppliedTermIndex().getIndex();
- assertTrue(
- followerOMLastAppliedIndex >= leaderOMSnapshotIndex - 1);
+
assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex
- 1);
// After the new checkpoint is installed, the follower OM
// lastAppliedIndex must >= the snapshot index of the checkpoint. It
// could be great than snapshot index if there is any conf entry from
ratis.
followerOMLastAppliedIndex = followerOM.getOmRatisServer()
.getLastAppliedTermIndex().getIndex();
- assertTrue(followerOMLastAppliedIndex >= leaderOMSnapshotIndex);
- assertTrue(followerOM.getOmRatisServer().getLastAppliedTermIndex()
- .getTerm() >= leaderOMSnapshotTermIndex);
+
assertThat(followerOMLastAppliedIndex).isGreaterThanOrEqualTo(leaderOMSnapshotIndex);
+ assertThat(followerOM.getOmRatisServer().getLastAppliedTermIndex()
+ .getTerm()).isGreaterThanOrEqualTo(leaderOMSnapshotTermIndex);
// Verify that the follower OM's DB contains the transactions which were
// made while it was inactive.
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
index d0d2bf40ef..02ad087965 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmAcls.java
@@ -48,9 +48,9 @@ import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ADMINISTRATORS_WILDCARD;
import static org.apache.hadoop.ozone.audit.AuditLogTestUtils.verifyAuditLog;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test for Ozone Manager ACLs.
@@ -124,8 +124,8 @@ public class TestOmAcls {
() -> TestDataUtil.createVolumeAndBucket(client));
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
- assertTrue(logCapturer.getOutput()
- .contains("doesn't have CREATE permission to access volume"));
+ assertThat(logCapturer.getOutput())
+ .contains("doesn't have CREATE permission to access volume");
verifyAuditLog(OMAction.CREATE_VOLUME, AuditEventStatus.FAILURE);
}
@@ -138,8 +138,8 @@ public class TestOmAcls {
objectStore.getVolume(bucket.getVolumeName()));
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
- assertTrue(logCapturer.getOutput()
- .contains("doesn't have READ permission to access volume"));
+ assertThat(logCapturer.getOutput())
+ .contains("doesn't have READ permission to access volume");
verifyAuditLog(OMAction.READ_VOLUME, AuditEventStatus.FAILURE);
}
@@ -151,8 +151,8 @@ public class TestOmAcls {
() -> TestDataUtil.createVolumeAndBucket(client));
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
- assertTrue(logCapturer.getOutput()
- .contains("doesn't have CREATE permission to access bucket"));
+ assertThat(logCapturer.getOutput())
+ .contains("doesn't have CREATE permission to access bucket");
verifyAuditLog(OMAction.CREATE_BUCKET, AuditEventStatus.FAILURE);
}
@@ -167,8 +167,8 @@ public class TestOmAcls {
);
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
- assertTrue(logCapturer.getOutput()
- .contains("doesn't have READ permission to access bucket"));
+ assertThat(logCapturer.getOutput())
+ .contains("doesn't have READ permission to access bucket");
verifyAuditLog(OMAction.READ_BUCKET, AuditEventStatus.FAILURE);
}
@@ -181,8 +181,8 @@ public class TestOmAcls {
OMException exception = assertThrows(OMException.class,
() -> TestDataUtil.createKey(bucket, "testKey", "testcontent"));
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
- assertTrue(logCapturer.getOutput().contains("doesn't have CREATE " +
- "permission to access key"));
+ assertThat(logCapturer.getOutput()).contains("doesn't have CREATE " +
+ "permission to access key");
}
@Test
@@ -195,8 +195,8 @@ public class TestOmAcls {
() -> TestDataUtil.getKey(bucket, "testKey"));
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
- assertTrue(logCapturer.getOutput().contains("doesn't have READ " +
- "permission to access key"));
+ assertThat(logCapturer.getOutput()).contains("doesn't have READ " +
+ "permission to access key");
verifyAuditLog(OMAction.READ_KEY, AuditEventStatus.FAILURE);
}
@@ -209,8 +209,8 @@ public class TestOmAcls {
OMException exception = assertThrows(OMException.class,
() -> bucket.setAcl(new ArrayList<>()));
assertEquals(ResultCodes.PERMISSION_DENIED, exception.getResult());
- assertTrue(logCapturer.getOutput()
- .contains("doesn't have WRITE_ACL permission to access bucket"));
+ assertThat(logCapturer.getOutput())
+ .contains("doesn't have WRITE_ACL permission to access bucket");
verifyAuditLog(OMAction.SET_ACL, AuditEventStatus.FAILURE);
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
index d49f059a06..8ab1cf54a5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java
@@ -22,9 +22,9 @@ import static
org.apache.hadoop.ozone.security.acl.OzoneObj.ResourceType.VOLUME;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.ArgumentMatchers.anyInt;
import static org.mockito.ArgumentMatchers.eq;
@@ -324,8 +324,7 @@ public class TestOmMetrics {
writeClient.commitKey(keyArgs, keySession.getId());
} catch (Exception e) {
//Expected Failure in preExecute due to not enough datanode
- assertTrue(e.getMessage().contains("No enough datanodes to choose"),
- e::getMessage);
+ assertThat(e.getMessage()).contains("No enough datanodes to choose");
}
omMetrics = getMetrics("OMMetrics");
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java
index fe407bf66f..613ebc58b5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotDisabledRestart.java
@@ -34,6 +34,8 @@ import org.junit.jupiter.api.Timeout;
import java.util.UUID;
+import static org.assertj.core.api.Assertions.assertThat;
+
/**
* Integration test to verify that if snapshot feature is disabled, OM start up
* will fail when there are still snapshots remaining.
@@ -102,7 +104,7 @@ public class TestOmSnapshotDisabledRestart {
// Restart OM, expect OM start up failure
RuntimeException rte = Assertions.assertThrows(RuntimeException.class,
() -> cluster.restartOzoneManager(om, true));
- Assertions.assertTrue(rte.getMessage().contains("snapshots
remaining"));
+ assertThat(rte.getMessage()).contains("snapshots remaining");
// Enable snapshot feature again
om.getConfiguration().setBoolean(
OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY, true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
index 7c7205b20b..790399c323 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotFileSystem.java
@@ -82,6 +82,7 @@ import static
org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
import static org.apache.hadoop.ozone.om.OmSnapshotManager.getSnapshotPath;
import static
org.apache.hadoop.ozone.om.helpers.BucketLayout.FILE_SYSTEM_OPTIMIZED;
import static org.apache.hadoop.ozone.om.helpers.BucketLayout.LEGACY;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -424,10 +425,10 @@ public abstract class TestOmSnapshotFileSystem {
final String errorMsg1 = "no longer active";
FileNotFoundException exception = assertThrows(FileNotFoundException.class,
() -> o3fs.listStatus(snapshotRoot));
- assertTrue(exception.getMessage().contains(errorMsg1));
+ assertThat(exception.getMessage()).contains(errorMsg1);
exception = assertThrows(FileNotFoundException.class,
() -> o3fs.listStatus(snapshotParent));
- assertTrue(exception.getMessage().contains(errorMsg1));
+ assertThat(exception.getMessage()).contains(errorMsg1);
// Note: Different error message due to inconsistent FNFE client-side
// handling in BasicOzoneClientAdapterImpl#getFileStatus
@@ -435,10 +436,10 @@ public abstract class TestOmSnapshotFileSystem {
final String errorMsg2 = "No such file or directory";
exception = assertThrows(FileNotFoundException.class,
() -> o3fs.getFileStatus(snapshotKey1));
- assertTrue(exception.getMessage().contains(errorMsg2));
+ assertThat(exception.getMessage()).contains(errorMsg2);
exception = assertThrows(FileNotFoundException.class,
() -> o3fs.getFileStatus(snapshotKey2));
- assertTrue(exception.getMessage().contains(errorMsg2));
+ assertThat(exception.getMessage()).contains(errorMsg2);
}
@Test
@@ -718,7 +719,7 @@ public abstract class TestOmSnapshotFileSystem {
"Total directories listed do not match the existing directories");
for (int i = 0; i < numDirs; i++) {
- assertTrue(paths.contains(fileStatuses[i].getPath().getName()));
+ assertThat(paths).contains(fileStatuses[i].getPath().getName());
}
deleteSnapshot(snapshotName);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
index 1a65d5d065..3b90b1b74c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerHAWithStoppedNodes.java
@@ -63,10 +63,10 @@ import java.util.UUID;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.ozone.MiniOzoneHAClusterImpl.NODE_FAILURE_TIMEOUT;
import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_CLIENT_WAIT_BETWEEN_RETRIES_MILLIS_DEFAULT;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Ozone Manager HA tests that stop/restart one or more OM nodes.
@@ -298,7 +298,7 @@ public class TestOzoneManagerHAWithStoppedNodes extends
TestOzoneManagerHA {
final long leaderOMSnaphsotIndex = leaderOM.getRatisSnapshotIndex();
// The stopped OM should be lagging behind the leader OM.
- assertTrue(followerOM1LastAppliedIndex < leaderOMSnaphsotIndex);
+ assertThat(followerOM1LastAppliedIndex).isLessThan(leaderOMSnaphsotIndex);
// Restart the stopped OM.
followerOM1.restart();
@@ -317,8 +317,7 @@ public class TestOzoneManagerHAWithStoppedNodes extends
TestOzoneManagerHA {
final long followerOM1LastAppliedIndexNew =
followerOM1.getOmRatisServer().getLastAppliedTermIndex().getIndex();
- assertTrue(
- followerOM1LastAppliedIndexNew > leaderOMSnaphsotIndex);
+
assertThat(followerOM1LastAppliedIndexNew).isGreaterThan(leaderOMSnaphsotIndex);
}
@Test
@@ -590,7 +589,7 @@ public class TestOzoneManagerHAWithStoppedNodes extends
TestOzoneManagerHA {
while (volumeIterator.hasNext()) {
OzoneVolume next = volumeIterator.next();
- assertTrue(expectedVolumes.contains(next.getName()));
+ assertThat(expectedVolumes).contains(next.getName());
expectedCount++;
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
index fbe2aa7adf..4279055129 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerPrepare.java
@@ -23,6 +23,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import java.io.File;
import java.io.IOException;
@@ -295,11 +296,8 @@ public class TestOzoneManagerPrepare extends
TestOzoneManagerHA {
.stream()
.anyMatch((vol) -> vol.getName().equals(volumeName)));
} catch (ExecutionException ex) {
- Throwable cause = ex.getCause();
- assertTrue(cause instanceof OMException);
- assertEquals(
- NOT_SUPPORTED_OPERATION_WHEN_PREPARED,
- ((OMException) cause).getResult());
+ OMException cause = assertInstanceOf(OMException.class,
ex.getCause());
+ assertEquals(NOT_SUPPORTED_OPERATION_WHEN_PREPARED,
cause.getResult());
}
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
index 90eec44292..ce6eb11998 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerRocksDBLogging.java
@@ -35,6 +35,8 @@ import org.junit.rules.TestRule;
import org.junit.rules.Timeout;
import org.apache.ozone.test.JUnit5AwareTimeout;
+import static org.assertj.core.api.Assertions.assertThat;
+
/**
* Test RocksDB logging for Ozone Manager.
*/
@@ -75,7 +77,7 @@ public class TestOzoneManagerRocksDBLogging {
waitForRocksDbLog();
Assert.fail("Unexpected RocksDB log: " + logCapturer.getOutput());
} catch (TimeoutException ex) {
- Assert.assertTrue(ex.getMessage().contains("Timed out"));
+ assertThat(ex.getMessage()).contains("Timed out");
}
enableRocksDbLogging(true);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
index d682c7f8f3..610b9c693c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestScmSafeMode.java
@@ -63,6 +63,7 @@ import static
org.apache.hadoop.hdds.client.ReplicationType.RATIS;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_DEADNODE_INTERVAL;
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -173,8 +174,8 @@ public class TestScmSafeMode {
IOException ioException = assertThrows(IOException.class,
() -> bucket1.createKey(keyName, 1000, RATIS, ONE,
new HashMap<>()));
- assertTrue(ioException.getMessage()
- .contains("SafeModePrecheck failed for allocateBlock"));
+ assertThat(ioException.getMessage())
+ .contains("SafeModePrecheck failed for allocateBlock");
}
/**
@@ -286,8 +287,8 @@ public class TestScmSafeMode {
double safeModeCutoff = conf
.getDouble(HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT,
HddsConfigKeys.HDDS_SCM_SAFEMODE_THRESHOLD_PCT_DEFAULT);
- assertTrue(scm.getCurrentContainerThreshold() >= safeModeCutoff);
- assertTrue(logCapturer.getOutput().contains("SCM exiting safe mode."));
+
assertThat(scm.getCurrentContainerThreshold()).isGreaterThanOrEqualTo(safeModeCutoff);
+ assertThat(logCapturer.getOutput()).contains("SCM exiting safe mode.");
assertFalse(scm.isInSafeMode());
}
@@ -302,8 +303,8 @@ public class TestScmSafeMode {
() -> scm.getClientProtocolServer()
.allocateContainer(ReplicationType.STAND_ALONE,
ReplicationFactor.ONE, ""));
- assertTrue(scmException.getMessage()
- .contains("SafeModePrecheck failed for allocateContainer"));
+ assertThat(scmException.getMessage())
+ .contains("SafeModePrecheck failed for allocateContainer");
cluster.startHddsDatanodes();
cluster.waitForClusterToBeReady();
cluster.waitTobeOutOfSafeMode();
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java
index e311bc0b5e..1cb436dcb3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantVolume.java
@@ -49,6 +49,7 @@ import java.util.concurrent.TimeoutException;
import static
org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isDone;
import static
org.apache.hadoop.ozone.admin.scm.FinalizeUpgradeCommandUtil.isStarting;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MULTITENANCY_ENABLED;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
@@ -94,8 +95,8 @@ public class TestMultiTenantVolume {
private static void expectFailurePreFinalization(VoidCallable eval) {
OMException omException = assertThrows(OMException.class, eval::call);
- assertTrue(omException.getMessage()
- .contains("cannot be invoked before finalization"));
+ assertThat(omException.getMessage())
+ .contains("cannot be invoked before finalization");
}
/**
@@ -310,7 +311,7 @@ public class TestMultiTenantVolume {
OMException.class,
() -> store.createTenant(tenantId));
- assertTrue(e.getMessage().contains("Invalid volume name: " + tenantId));
+ assertThat(e.getMessage()).contains("Invalid volume name: " + tenantId);
}
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index 08358054fc..c01d0c9044 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -76,9 +76,9 @@ import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_HTTPS_ADDRESS
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_RANGER_SERVICE;
import static
org.apache.hadoop.ozone.om.OMMultiTenantManager.OZONE_TENANT_RANGER_ROLE_DESCRIPTION;
import static
org.apache.hadoop.security.authentication.util.KerberosName.DEFAULT_MECHANISM;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.ArgumentMatchers.any;
import static org.mockito.Mockito.doAnswer;
@@ -467,7 +467,7 @@ public class TestRangerBGSyncService {
final long rangerSvcVersionBefore =
bgSync.getRangerOzoneServicePolicyVersion();
- assertTrue(rangerSvcVersionBefore >= startingRangerVersion);
+
assertThat(rangerSvcVersionBefore).isGreaterThanOrEqualTo(startingRangerVersion);
// Note: DB Service Version will be -1 if the test starts with an empty DB
final long dbSvcVersionBefore = bgSync.getOMDBRangerServiceVersion();
@@ -481,8 +481,8 @@ public class TestRangerBGSyncService {
final long rangerSvcVersionAfter =
bgSync.getRangerOzoneServicePolicyVersion();
assertEquals(rangerSvcVersionAfter, dbSvcVersionAfter);
- assertTrue(dbSvcVersionAfter > dbSvcVersionBefore);
- assertTrue(rangerSvcVersionAfter > rangerSvcVersionBefore);
+ assertThat(dbSvcVersionAfter).isGreaterThan(dbSvcVersionBefore);
+ assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore);
// Verify that the Ranger policies and roles not backed up
// by OzoneManager Multi-Tenancy tables are cleaned up by sync thread
@@ -530,7 +530,7 @@ public class TestRangerBGSyncService {
createRolesAndPoliciesInRanger(true);
long rangerSvcVersionBefore = bgSync.getRangerOzoneServicePolicyVersion();
- assertTrue(rangerSvcVersionBefore >= startingRangerVersion);
+
assertThat(rangerSvcVersionBefore).isGreaterThanOrEqualTo(startingRangerVersion);
// Note: DB Service Version will be -1 if the test starts with an empty DB
final long dbSvcVersionBefore = bgSync.getOMDBRangerServiceVersion();
@@ -583,7 +583,7 @@ public class TestRangerBGSyncService {
long rangerVersionAfterCreation =
bgSync.getRangerOzoneServicePolicyVersion();
- assertTrue(rangerVersionAfterCreation >= startingRangerVersion);
+
assertThat(rangerVersionAfterCreation).isGreaterThanOrEqualTo(startingRangerVersion);
// Delete user bob from user role, expect Ranger sync thread to update it
String userRoleName = rolesCreated.get(0);
@@ -617,8 +617,8 @@ public class TestRangerBGSyncService {
final long rangerSvcVersionAfter =
bgSync.getRangerOzoneServicePolicyVersion();
assertEquals(rangerSvcVersionAfter, dbSvcVersionAfter);
- assertTrue(dbSvcVersionAfter > dbSvcVersionBefore);
- assertTrue(rangerSvcVersionAfter > rangerSvcVersionBefore);
+ assertThat(dbSvcVersionAfter).isGreaterThan(dbSvcVersionBefore);
+ assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore);
for (String policyName : policiesCreated) {
final Policy policy = accessController.getPolicy(policyName);
@@ -651,7 +651,7 @@ public class TestRangerBGSyncService {
long rangerVersionAfterCreation =
bgSync.getRangerOzoneServicePolicyVersion();
- assertTrue(rangerVersionAfterCreation >= startingRangerVersion);
+
assertThat(rangerVersionAfterCreation).isGreaterThanOrEqualTo(startingRangerVersion);
// Delete both policies, expect Ranger sync thread to recover both
accessController.deletePolicy(
@@ -673,8 +673,8 @@ public class TestRangerBGSyncService {
final long rangerSvcVersionAfter =
bgSync.getRangerOzoneServicePolicyVersion();
assertEquals(rangerSvcVersionAfter, dbSvcVersionAfter);
- assertTrue(dbSvcVersionAfter > dbSvcVersionBefore);
- assertTrue(rangerSvcVersionAfter > rangerSvcVersionBefore);
+ assertThat(dbSvcVersionAfter).isGreaterThan(dbSvcVersionBefore);
+ assertThat(rangerSvcVersionAfter).isGreaterThan(rangerSvcVersionBefore);
for (String policyName : policiesCreated) {
try {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
index 7e9fe787df..dcd206f07b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/snapshot/TestOmSnapshot.java
@@ -134,6 +134,7 @@ import static
org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.CA
import static
org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.DONE;
import static
org.apache.hadoop.ozone.snapshot.SnapshotDiffResponse.JobStatus.IN_PROGRESS;
import static
org.apache.ozone.rocksdiff.RocksDBCheckpointDiffer.COLUMN_FAMILIES_TO_TRACK_IN_DAG;
+import static org.assertj.core.api.Assertions.assertThat;
import static org.awaitility.Awaitility.with;
import static org.awaitility.Awaitility.await;
import static org.hamcrest.MatcherAssert.assertThat;
@@ -1392,10 +1393,10 @@ public abstract class TestOmSnapshot {
SnapshotDiffReportOzone
diff3 = getSnapDiffReport(volume, bucket, snap3, snap4);
assertEquals(1, diff3.getDiffList().size());
- assertTrue(diff3.getDiffList().contains(
+ assertThat(diff3.getDiffList()).contains(
SnapshotDiffReportOzone.getDiffReportEntry(
SnapshotDiffReportOzone.DiffType.RENAME, key2,
- key2Renamed)));
+ key2Renamed));
// Create a directory
@@ -1406,9 +1407,9 @@ public abstract class TestOmSnapshot {
SnapshotDiffReportOzone
diff4 = getSnapDiffReport(volume, bucket, snap4, snap5);
assertEquals(1, diff4.getDiffList().size());
- assertTrue(diff4.getDiffList().contains(
+ assertThat(diff4.getDiffList()).contains(
SnapshotDiffReportOzone.getDiffReportEntry(
- SnapshotDiffReportOzone.DiffType.CREATE, dir1)));
+ SnapshotDiffReportOzone.DiffType.CREATE, dir1));
String key3 = createFileKeyWithPrefix(bucket1, "key-3-");
String snap6 = "snap" + counter.incrementAndGet();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]