This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new baae7507cb HDDS-10207. Simplify tests using assertThrows in
hadoop-ozone (#6130)
baae7507cb is described below
commit baae7507cb0c23f61a607c0f6d943019455f6bba
Author: Zhaohui Wang <[email protected]>
AuthorDate: Thu Feb 1 13:54:58 2024 +0800
HDDS-10207. Simplify tests using assertThrows in hadoop-ozone (#6130)
---
.../hadoop/ozone/client/TestOzoneClient.java | 9 +-
.../hadoop/ozone/client/TestOzoneECClient.java | 23 ++---
.../hadoop/ozone/client/rpc/TestOzoneKMSUtil.java | 12 +--
.../org/apache/hadoop/ozone/TestOzoneAcls.java | 10 +-
.../hadoop/ozone/om/lock/TestKeyPathLock.java | 54 +++++------
.../hadoop/ozone/om/lock/TestOzoneManagerLock.java | 30 +++---
.../ozone/om/protocolPB/TestS3GrpcOmTransport.java | 17 +---
.../ozone/security/TestGDPRSymmetricKey.java | 13 +--
.../ozone/TestDirectoryDeletingServiceWithFSO.java | 11 +--
.../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 45 +++------
.../hdds/scm/TestStorageContainerManager.java | 48 ++++------
.../hadoop/hdds/scm/storage/TestCommitWatcher.java | 39 ++++----
.../hadoop/ozone/TestContainerOperations.java | 12 +--
.../ozone/client/TestOzoneClientFactory.java | 15 ++-
.../rpc/TestContainerStateMachineFailures.java | 6 +-
.../client/rpc/TestDeleteWithInAdequateDN.java | 16 ++--
.../rpc/TestOzoneClientMultipartUploadWithFSO.java | 25 +----
.../rpc/TestOzoneClientRetriesOnExceptions.java | 31 +++----
.../client/rpc/TestOzoneRpcClientAbstract.java | 45 +++------
.../ozone/client/rpc/TestWatchForCommit.java | 53 +++++------
.../hadoop/ozone/om/TestAddRemoveOzoneManager.java | 76 +++++++---------
.../apache/hadoop/ozone/om/TestBucketOwner.java | 79 +++++++---------
.../apache/hadoop/ozone/om/TestKeyManagerImpl.java | 101 +++++++++------------
.../apache/hadoop/ozone/om/TestObjectStore.java | 16 ++--
.../hadoop/ozone/om/TestObjectStoreWithFSO.java | 92 +++++++------------
.../ozone/om/TestObjectStoreWithLegacyFS.java | 14 ++-
.../ozone/om/TestOzoneManagerConfiguration.java | 25 ++---
.../ozone/om/TestOzoneManagerListVolumes.java | 23 ++---
.../hadoop/ozone/om/TestRecursiveAclWithFSO.java | 64 +++++--------
.../ozone/om/service/TestRangerBGSyncService.java | 30 ++----
.../hadoop/ozone/shell/TestOzoneDatanodeShell.java | 18 ++--
.../hadoop/ozone/shell/TestOzoneShellHA.java | 18 ++--
.../hadoop/ozone/shell/TestOzoneTenantShell.java | 18 ++--
.../hadoop/ozone/om/TestOMMultiTenantManager.java | 21 ++---
.../TestMultiTenantAccessController.java | 38 ++------
.../om/ratis/TestOzoneManagerStateMachine.java | 24 ++---
.../request/TestBucketLayoutAwareOMKeyFactory.java | 27 +++---
.../ozone/om/request/TestNormalizePaths.java | 13 ++-
.../om/request/key/TestOMKeyCreateRequest.java | 19 ++--
.../request/validation/TestRequestValidations.java | 15 +--
.../ozone/om/upgrade/TestOMUpgradeFinalizer.java | 16 +---
.../ozone/om/upgrade/TestOMVersionManager.java | 11 +--
.../ozone/recon/api/TestContainerEndpoint.java | 11 +--
.../s3/endpoint/TestMultipartUploadComplete.java | 24 ++---
.../hadoop/ozone/s3/endpoint/TestPartUpload.java | 12 +--
.../s3/endpoint/TestPartUploadWithStream.java | 12 +--
.../ozone/s3/metrics/TestS3GatewayMetrics.java | 8 +-
.../hadoop/ozone/audit/parser/TestAuditParser.java | 13 +--
.../ozone/scm/TestDecommissionScmSubcommand.java | 11 +--
49 files changed, 498 insertions(+), 865 deletions(-)
diff --git
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
index eb88346414..09a6c0a5c0 100644
---
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
+++
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClient.java
@@ -48,7 +48,6 @@ import java.util.UUID;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.ozone.test.GenericTestUtils.getTestStartTime;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
@@ -68,12 +67,8 @@ public class TestOzoneClient {
OMException.ResultCodes code,
VoidCallable eval)
throws Exception {
- try {
- eval.call();
- fail("OMException is expected");
- } catch (OMException ex) {
- assertEquals(code, ex.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () -> eval.call());
+ assertEquals(code, ex.getResult());
}
@BeforeEach
diff --git
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
index a2287ecc52..c3e44c7d6a 100644
---
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
+++
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/TestOzoneECClient.java
@@ -706,20 +706,15 @@ public class TestOzoneECClient {
nodesIndexesToMarkFailure[2] = 10;
//To mark node failed in fourth block group.
nodesIndexesToMarkFailure[3] = 15;
- try {
- // Mocked MultiNodePipelineBlockAllocator#allocateBlock implementation
can
- // pick good block group, but client retries should be limited
- // OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES_ON_FAILURE(here it was
- // configured as 3). So, it should fail as we have marked 3 nodes as bad.
- testStripeWriteRetriesOnFailures(con, 20, nodesIndexesToMarkFailure);
- fail(
- "Expecting it to fail as retries should exceed the max allowed
times:"
- + " " + 3);
- } catch (IOException e) {
- assertEquals(
- "Completed max allowed retries 3 on stripe failures.",
- e.getMessage());
- }
+ // Mocked MultiNodePipelineBlockAllocator#allocateBlock implementation can
+ // pick good block group, but client retries should be limited
+ // OZONE_CLIENT_MAX_EC_STRIPE_WRITE_RETRIES_ON_FAILURE(here it was
+ // configured as 3). So, it should fail as we have marked 3 nodes as bad.
+ IOException e = assertThrows(IOException.class,
+ () -> testStripeWriteRetriesOnFailures(con, 20,
nodesIndexesToMarkFailure));
+ assertEquals(
+ "Completed max allowed retries 3 on stripe failures.",
+ e.getMessage());
}
public void testStripeWriteRetriesOnFailures(OzoneConfiguration con,
diff --git
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
index ea70f19fdf..8d9efd9632 100644
---
a/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
+++
b/hadoop-ozone/client/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneKMSUtil.java
@@ -25,7 +25,7 @@ import org.junit.jupiter.api.Test;
import java.io.IOException;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Test class for {@link OzoneKMSUtil}.
@@ -41,12 +41,8 @@ public class TestOzoneKMSUtil {
@Test
public void getKeyProvider() {
- try {
- OzoneKMSUtil.getKeyProvider(config, null);
- fail("Expected IOException.");
- } catch (IOException ioe) {
- assertEquals(ioe.getMessage(), "KMS serverProviderUri is " +
- "not configured.");
- }
+ IOException ioe =
+ assertThrows(IOException.class, () ->
OzoneKMSUtil.getKeyProvider(config, null));
+ assertEquals(ioe.getMessage(), "KMS serverProviderUri is " + "not
configured.");
}
}
diff --git
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
index a815b72dec..08ae1fbc65 100644
---
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
+++
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/TestOzoneAcls.java
@@ -36,7 +36,6 @@ import static
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.REA
import static
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE;
import static
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType.WRITE_ACL;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
@@ -122,14 +121,7 @@ class TestOzoneAcls {
if (entry.getValue()) {
OzoneAcl.parseAcl(entry.getKey());
} else {
- try {
- OzoneAcl.parseAcl(entry.getKey());
- // should never get here since parseAcl will throw
- fail("An exception was expected but did not happen. Key: " +
- entry.getKey());
- } catch (IllegalArgumentException e) {
- // nothing to do
- }
+ assertThrows(IllegalArgumentException.class, () ->
OzoneAcl.parseAcl(entry.getKey()));
}
}
}
diff --git
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java
index 75adb7e6a1..4a81485206 100644
---
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java
+++
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestKeyPathLock.java
@@ -30,7 +30,7 @@ import java.util.UUID;
import java.util.concurrent.CountDownLatch;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertEquals;
/**
@@ -239,14 +239,11 @@ class TestKeyPathLock extends TestOzoneManagerLock {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireWriteLock(resource, resourceName);
- try {
- lock.acquireWriteLock(higherResource, higherResourceName);
- fail("testAcquireWriteBucketLockWhileAcquiredWriteKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock "
+
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () ->
lock.acquireWriteLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
@Test
@@ -264,14 +261,11 @@ class TestKeyPathLock extends TestOzoneManagerLock {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireReadLock(resource, resourceName);
- try {
- lock.acquireWriteLock(higherResource, higherResourceName);
- fail("testAcquireWriteBucketLockWhileAcquiredReadKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock "
+
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () ->
lock.acquireWriteLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
@Test
@@ -289,14 +283,11 @@ class TestKeyPathLock extends TestOzoneManagerLock {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireReadLock(resource, resourceName);
- try {
- lock.acquireReadLock(higherResource, higherResourceName);
- fail("testAcquireReadBucketLockWhileAcquiredReadKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock "
+
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () ->
lock.acquireReadLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
@Test
@@ -314,13 +305,10 @@ class TestKeyPathLock extends TestOzoneManagerLock {
higherResourceName = new String[]{volumeName, bucketName};
lock.acquireWriteLock(resource, resourceName);
- try {
- lock.acquireReadLock(higherResource, higherResourceName);
- fail("testAcquireReadBucketLockWhileAcquiredWriteKeyPathLock() failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + higherResource.getName() + " lock "
+
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () ->
lock.acquireReadLock(higherResource, higherResourceName));
+ String message = "cannot acquire " + higherResource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
}
diff --git
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
index 856f2b238c..54ab718ccf 100644
---
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
+++
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/lock/TestOzoneManagerLock.java
@@ -40,7 +40,6 @@ import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Class tests OzoneManagerLock.
@@ -77,14 +76,11 @@ class TestOzoneManagerLock {
resource == Resource.S3_SECRET_LOCK ||
resource == Resource.PREFIX_LOCK) {
lock.acquireWriteLock(resource, resourceName);
- try {
- lock.acquireWriteLock(resource, resourceName);
- fail("reacquireResourceLock failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + resource.getName() + " lock " +
- "while holding [" + resource.getName() + "] lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () ->
lock.acquireWriteLock(resource, resourceName));
+ String message = "cannot acquire " + resource.getName() + " lock " +
+ "while holding [" + resource.getName() + "] lock(s).";
+ assertThat(ex).hasMessageContaining(message);
assertDoesNotThrow(() -> lock.releaseWriteLock(resource, resourceName));
} else {
lock.acquireWriteLock(resource, resourceName);
@@ -162,15 +158,13 @@ class TestOzoneManagerLock {
stack.push(new ResourceInfo(resourceName, higherResource));
currentLocks.add(higherResource.getName());
// try to acquire lower level lock
- try {
- resourceName = generateResourceName(resource);
- lock.acquireWriteLock(resource, resourceName);
- fail("testLockViolations failed");
- } catch (RuntimeException ex) {
- String message = "cannot acquire " + resource.getName() + " lock "
+
- "while holding " + currentLocks + " lock(s).";
- assertThat(ex).hasMessageContaining(message);
- }
+ RuntimeException ex = assertThrows(RuntimeException.class, () -> {
+ String[] resourceName1 = generateResourceName(resource);
+ lock.acquireWriteLock(resource, resourceName1);
+ });
+ String message = "cannot acquire " + resource.getName() + " lock " +
+ "while holding " + currentLocks + " lock(s).";
+ assertThat(ex).hasMessageContaining(message);
}
}
diff --git
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
index 04bb4b240d..10bb155d70 100644
---
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
+++
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.protocolPB;
import static org.apache.hadoop.ozone.ClientVersion.CURRENT_VERSION;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH_DEFAULT;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.AdditionalAnswers.delegatesTo;
import static org.mockito.Mockito.mock;
@@ -46,8 +47,6 @@ import java.io.IOException;
import com.google.protobuf.ServiceException;
import org.apache.ratis.protocol.RaftPeerId;
-import static org.junit.jupiter.api.Assertions.fail;
-import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.apache.hadoop.ozone.om.OMConfigKeys
.OZONE_OM_GRPC_MAXIMUM_RESPONSE_LENGTH;
@@ -216,12 +215,7 @@ public class TestS3GrpcOmTransport {
// OMFailoverProvider returns Fail retry due to #attempts >
// max failovers
- try {
- final OMResponse resp = client.submitRequest(omRequest);
- fail();
- } catch (Exception e) {
- assertTrue(true);
- }
+ assertThrows(Exception.class, () -> client.submitRequest(omRequest));
}
@Test
@@ -251,11 +245,6 @@ public class TestS3GrpcOmTransport {
// len > 0, causing RESOURCE_EXHAUSTED exception.
// This exception should cause failover to NOT retry,
// rather to fail.
- try {
- final OMResponse resp = client.submitRequest(omRequest);
- fail();
- } catch (Exception e) {
- assertTrue(true);
- }
+ assertThrows(Exception.class, () -> client.submitRequest(omRequest));
}
}
diff --git
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
index 0b69d0dd9b..1ab01ee3e0 100644
---
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
+++
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/security/TestGDPRSymmetricKey.java
@@ -22,8 +22,8 @@ import org.junit.jupiter.api.Test;
import java.security.SecureRandom;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Tests GDPRSymmetricKey structure.
@@ -56,13 +56,8 @@ public class TestGDPRSymmetricKey {
@Test
public void testKeyGenerationWithInvalidInput() throws Exception {
- try {
- new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5),
- OzoneConsts.GDPR_ALGORITHM_NAME);
- fail("Expect length mismatched");
- } catch (IllegalArgumentException ex) {
- assertTrue(ex.getMessage()
- .equalsIgnoreCase("Secret must be exactly 16 characters"));
- }
+ IllegalArgumentException e = assertThrows(IllegalArgumentException.class,
+ () -> new GDPRSymmetricKey(RandomStringUtils.randomAlphabetic(5),
OzoneConsts.GDPR_ALGORITHM_NAME));
+ assertTrue(e.getMessage().equalsIgnoreCase("Secret must be exactly 16
characters"));
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
index c41d4fa9aa..87f114bd71 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestDirectoryDeletingServiceWithFSO.java
@@ -59,7 +59,7 @@ import java.util.function.LongSupplier;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
@@ -547,13 +547,8 @@ public class TestDirectoryDeletingServiceWithFSO {
}
private void checkPath(Path path) {
- try {
- fs.getFileStatus(path);
- fail("testRecursiveDelete failed");
- } catch (IOException ex) {
- assertInstanceOf(FileNotFoundException.class, ex);
- assertThat(ex.getMessage()).contains("No such file or directory");
- }
+ FileNotFoundException ex = assertThrows(FileNotFoundException.class, () ->
fs.getFileStatus(path));
+ assertThat(ex.getMessage()).contains("No such file or directory");
}
private static BucketLayout getFSOBucketLayout() {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index cf84a8ad30..6dccd60420 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -47,7 +47,6 @@ import org.junit.jupiter.api.Test;
import org.junit.jupiter.api.Timeout;
import java.io.FileNotFoundException;
-import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
@@ -62,11 +61,9 @@ import static
org.apache.hadoop.ozone.OzoneConsts.OZONE_URI_SCHEME;
import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Class tests create with object store and getFileStatus.
@@ -266,15 +263,8 @@ public class TestOzoneFSWithObjectStoreCreate {
// Before close create directory with same name.
o3fs.mkdirs(new Path("/a/b/c"));
-
- try {
- ozoneOutputStream.close();
- fail("testKeyCreationFailDuetoDirectoryCreationBeforeCommit");
- } catch (IOException ex) {
- OMException e = assertInstanceOf(OMException.class, ex);
- assertEquals(NOT_A_FILE, e.getResult());
- }
-
+ OMException ex = assertThrows(OMException.class, () ->
ozoneOutputStream.close());
+ assertEquals(NOT_A_FILE, ex.getResult());
}
@@ -309,14 +299,10 @@ public class TestOzoneFSWithObjectStoreCreate {
partsMap.put(1, ozoneOutputStream.getCommitUploadPartInfo().getPartName());
// Should fail, as we have directory with same name.
- try {
- ozoneBucket.completeMultipartUpload(keyName,
- omMultipartInfo.getUploadID(), partsMap);
- fail("testMPUFailDuetoDirectoryCreationBeforeComplete failed");
- } catch (OMException ex) {
- assertInstanceOf(OMException.class, ex);
- assertEquals(NOT_A_FILE, ex.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () ->
ozoneBucket.completeMultipartUpload(keyName,
+ omMultipartInfo.getUploadID(), partsMap));
+ assertEquals(NOT_A_FILE, ex.getResult());
+
// Delete directory
o3fs.delete(new Path(keyName), true);
@@ -339,25 +325,16 @@ public class TestOzoneFSWithObjectStoreCreate {
public void testCreateDirectoryFirstThenKeyAndFileWithSameName()
throws Exception {
o3fs.mkdirs(new Path("/t1/t2"));
-
- try {
- o3fs.create(new Path("/t1/t2"));
- fail("testCreateDirectoryFirstThenFileWithSameName failed");
- } catch (FileAlreadyExistsException ex) {
- assertThat(ex.getMessage()).contains(NOT_A_FILE.name());
- }
+ FileAlreadyExistsException e =
+ assertThrows(FileAlreadyExistsException.class, () -> o3fs.create(new
Path("/t1/t2")));
+ assertThat(e.getMessage()).contains(NOT_A_FILE.name());
OzoneVolume ozoneVolume =
client.getObjectStore().getVolume(volumeName);
OzoneBucket ozoneBucket = ozoneVolume.getBucket(bucketName);
ozoneBucket.createDirectory("t1/t2");
- try {
- ozoneBucket.createKey("t1/t2", 0);
- fail("testCreateDirectoryFirstThenFileWithSameName failed");
- } catch (OMException ex) {
- assertInstanceOf(OMException.class, ex);
- assertEquals(NOT_A_FILE, ex.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () ->
ozoneBucket.createKey("t1/t2", 0));
+ assertEquals(NOT_A_FILE, ex.getResult());
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
index 07787b87f3..cf43ffcc22 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManager.java
@@ -217,19 +217,14 @@ public class TestStorageContainerManager {
cluster.getStorageContainerManager().getClientProtocolServer());
mockRemoteUser(UserGroupInformation.createRemoteUser(fakeRemoteUsername));
-
- try {
- mockClientServer.deleteContainer(
- ContainerTestHelper.getTestContainerID());
- fail("Operation should fail, expecting an IOException here.");
- } catch (Exception e) {
- if (expectPermissionDenied) {
- verifyPermissionDeniedException(e, fakeRemoteUsername);
- } else {
- // If passes permission check, it should fail with
- // container not exist exception.
- assertInstanceOf(ContainerNotFoundException.class, e);
- }
+ Exception ex = assertThrows(Exception.class, () ->
mockClientServer.deleteContainer(
+ ContainerTestHelper.getTestContainerID()));
+ if (expectPermissionDenied) {
+ verifyPermissionDeniedException(ex, fakeRemoteUsername);
+ } else {
+ // If passes permission check, it should fail with
+ // container not exist exception.
+ assertInstanceOf(ContainerNotFoundException.class, ex);
}
try {
@@ -245,18 +240,14 @@ public class TestStorageContainerManager {
verifyPermissionDeniedException(e, fakeRemoteUsername);
}
- try {
- mockClientServer.getContainer(
- ContainerTestHelper.getTestContainerID());
- fail("Operation should fail, expecting an IOException here.");
- } catch (Exception e) {
- if (expectPermissionDenied) {
- verifyPermissionDeniedException(e, fakeRemoteUsername);
- } else {
- // If passes permission check, it should fail with
- // key not exist exception.
- assertInstanceOf(ContainerNotFoundException.class, e);
- }
+ Exception e = assertThrows(Exception.class, () ->
mockClientServer.getContainer(
+ ContainerTestHelper.getTestContainerID()));
+ if (expectPermissionDenied) {
+ verifyPermissionDeniedException(e, fakeRemoteUsername);
+ } else {
+ // If passes permission check, it should fail with
+ // key not exist exception.
+ assertInstanceOf(ContainerNotFoundException.class, e);
}
}
@@ -723,12 +714,7 @@ public class TestStorageContainerManager {
final String clusterId =
cluster.getStorageContainerManager().getClusterId();
// validate there is no ratis group pre existing
- try {
- validateRatisGroupExists(conf, clusterId);
- fail();
- } catch (IOException ioe) {
- // Exception is expected here
- }
+ assertThrows(IOException.class, () -> validateRatisGroupExists(conf,
clusterId));
conf.setBoolean(ScmConfigKeys.OZONE_SCM_HA_ENABLE_KEY, true);
// This will re-initialize SCM
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
index f971dc6adb..d4581d1ad4 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/storage/TestCommitWatcher.java
@@ -59,8 +59,8 @@ import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTER
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import org.apache.ratis.protocol.exceptions.AlreadyClosedException;
import org.apache.ratis.protocol.exceptions.NotReplicatedException;
@@ -300,26 +300,23 @@ public class TestCommitWatcher {
assertThat(watcher.getTotalAckDataLength()).isGreaterThanOrEqualTo(chunkSize);
cluster.shutdownHddsDatanode(pipeline.getNodes().get(0));
cluster.shutdownHddsDatanode(pipeline.getNodes().get(1));
- try {
- // just watch for a higher index so as to ensure, it does an actual
- // call to Ratis. Otherwise, it may just return in case the
- // commitInfoMap is updated to the latest index in putBlock response.
- watcher.watchForCommit(replies.get(1).getLogIndex() + 100);
- fail("Expected exception not thrown");
- } catch (IOException ioe) {
- // with retry count set to noRetry and a lower watch request
- // timeout, watch request will eventually
- // fail with TimeoutIOException from ratis client or the client
- // can itself get AlreadyClosedException from the Ratis Server
- // and the write may fail with RaftRetryFailureException
- Throwable t = HddsClientUtils.checkForException(ioe);
- assertTrue(
- t instanceof RaftRetryFailureException ||
- t instanceof TimeoutIOException ||
- t instanceof AlreadyClosedException ||
- t instanceof NotReplicatedException,
- "Unexpected exception: " + t.getClass());
- }
+ // just watch for a higher index so as to ensure, it does an actual
+ // call to Ratis. Otherwise, it may just return in case the
+ // commitInfoMap is updated to the latest index in putBlock response.
+ IOException ioe =
+ assertThrows(IOException.class, () ->
watcher.watchForCommit(replies.get(1).getLogIndex() + 100));
+ Throwable t = HddsClientUtils.checkForException(ioe);
+ // with retry count set to noRetry and a lower watch request
+ // timeout, watch request will eventually
+ // fail with TimeoutIOException from ratis client or the client
+ // can itself get AlreadyClosedException from the Ratis Server
+ // and the write may fail with RaftRetryFailureException
+ assertTrue(
+ t instanceof RaftRetryFailureException ||
+ t instanceof TimeoutIOException ||
+ t instanceof AlreadyClosedException ||
+ t instanceof NotReplicatedException,
+ "Unexpected exception: " + t.getClass());
if (ratisClient.getReplicatedMinCommitIndex() < replies.get(1)
.getLogIndex()) {
assertEquals(chunkSize, watcher.getTotalAckDataLength());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
index de57152a21..5f8f34a2e3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestContainerOperations.java
@@ -44,8 +44,8 @@ import org.junit.jupiter.api.Timeout;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This class tests container operations (TODO currently only supports create)
@@ -94,13 +94,9 @@ public class TestContainerOperations {
*/
@Test
public void testGetPipeline() throws Exception {
- try {
- storageClient.getPipeline(PipelineID.randomId().getProtobuf());
- fail("Get Pipeline should fail");
- } catch (Exception e) {
- assertInstanceOf(PipelineNotFoundException.class,
SCMHAUtils.unwrapException(e));
- }
-
+ Exception e =
+ assertThrows(Exception.class, () ->
storageClient.getPipeline(PipelineID.randomId().getProtobuf()));
+ assertInstanceOf(PipelineNotFoundException.class,
SCMHAUtils.unwrapException(e));
assertThat(storageClient.listPipelines()).isNotEmpty();
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
index 5e3c3ab5a7..666161bcf1 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/TestOzoneClientFactory.java
@@ -16,7 +16,8 @@
*/
package org.apache.hadoop.ozone.client;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.security.AccessControlException;
@@ -39,8 +40,7 @@ public class TestOzoneClientFactory {
public void testRemoteException() {
OzoneConfiguration conf = new OzoneConfiguration();
-
- try {
+ Exception e = assertThrows(Exception.class, () -> {
MiniOzoneCluster cluster = MiniOzoneCluster.newBuilder(conf)
.setNumDatanodes(3)
.setTotalPipelineNumLimit(10)
@@ -59,17 +59,14 @@ public class TestOzoneClientFactory {
public Void run() throws IOException {
conf.set("ozone.security.enabled", "true");
try (OzoneClient ozoneClient =
- OzoneClientFactory.getRpcClient("localhost",
- Integer.parseInt(omPort), conf)) {
+ OzoneClientFactory.getRpcClient("localhost",
Integer.parseInt(omPort), conf)) {
ozoneClient.getObjectStore().listVolumes("/");
}
return null;
}
});
- fail("Should throw exception here");
- } catch (IOException | InterruptedException e) {
- assert e instanceof AccessControlException;
- }
+ });
+ assertInstanceOf(AccessControlException.class, e);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
index 2a316cdedd..01ef2cf1be 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestContainerStateMachineFailures.java
@@ -95,6 +95,7 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -459,10 +460,7 @@ public class TestContainerStateMachineFailures {
// a pipeline close action
try {
- xceiverClient.sendCommand(request.build());
- fail("Expected exception not thrown");
- } catch (IOException e) {
- // Exception should be thrown
+ assertThrows(IOException.class, () ->
xceiverClient.sendCommand(request.build()));
} finally {
xceiverClientManager.releaseClient(xceiverClient, false);
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
index 0363d7aef1..ba975e6b21 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestDeleteWithInAdequateDN.java
@@ -71,9 +71,9 @@ import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_PIPELINE_DESTRO
import static
org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_STALENODE_INTERVAL;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertSame;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assumptions;
@@ -319,16 +319,14 @@ public class TestDeleteWithInAdequateDN {
.getDispatcher()
.getHandler(ContainerProtos.ContainerType.KeyValueContainer);
// make sure the chunk is now deleted on the all dns
- try {
+ KeyValueHandler finalKeyValueHandler = keyValueHandler;
+ StorageContainerException e =
assertThrows(StorageContainerException.class, () -> {
for (ContainerProtos.ChunkInfo chunkInfo : blockData.getChunks()) {
- keyValueHandler.getChunkManager().readChunk(container, blockID,
- ChunkInfo.getFromProtoBuf(chunkInfo), null);
+ finalKeyValueHandler.getChunkManager().readChunk(container, blockID,
+ ChunkInfo.getFromProtoBuf(chunkInfo), null);
}
- fail("Expected exception is not thrown");
- } catch (IOException ioe) {
- StorageContainerException e =
assertInstanceOf(StorageContainerException.class, ioe);
- assertSame(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, e.getResult());
- }
+ });
+ assertSame(ContainerProtos.Result.UNABLE_TO_FIND_CHUNK, e.getResult());
}
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 2de5e83a4e..027af41319 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -82,13 +82,11 @@ import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NO_S
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* This test verifies all the S3 multipart client apis - prefix layout.
@@ -489,15 +487,9 @@ public class TestOzoneClientMultipartUploadWithFSO {
String part1 = new String(data, UTF_8);
sb.append(part1);
assertEquals(sb.toString(), new String(fileContent, UTF_8));
-
- try {
- ozoneOutputStream.close();
- fail("testCommitPartAfterCompleteUpload failed");
- } catch (IOException ex) {
- assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
- ((OMException) ex).getResult());
- }
+ OzoneOutputStream finalOzoneOutputStream = ozoneOutputStream;
+ OMException ex = assertThrows(OMException.class, () ->
finalOzoneOutputStream.close());
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ex.getResult());
}
@Test
@@ -522,15 +514,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
// Abort before completing part upload.
bucket.abortMultipartUpload(keyName, uploadID);
-
- try {
- ozoneOutputStream.close();
- fail("testAbortUploadFailWithInProgressPartUpload failed");
- } catch (IOException ex) {
- assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR,
- ((OMException) ex).getResult());
- }
+ OMException ome = assertThrows(OMException.class, () ->
ozoneOutputStream.close());
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
}
@Test
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
index 5d6b601ad9..2c83cf7854 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientRetriesOnExceptions.java
@@ -53,6 +53,7 @@ import static java.nio.charset.StandardCharsets.UTF_8;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
import org.junit.jupiter.api.AfterEach;
@@ -230,28 +231,24 @@ public class TestOzoneClientRetriesOnExceptions {
// the max retry count of N.
Assumptions.assumeTrue(containerList.size() > MAX_RETRIES,
containerList.size() + " <= " + MAX_RETRIES);
- try {
+ IOException ioe = assertThrows(IOException.class, () -> {
key.write(data1);
// ensure that write is flushed to dn
key.flush();
- fail("Expected exception not thrown");
- } catch (IOException ioe) {
- assertInstanceOf(ContainerNotOpenException.class,
-
HddsClientUtils.checkForException(blockOutputStream.getIoException()));
- assertThat(ioe.getMessage()).contains(
- "Retry request failed. " +
- "retries get failed due to exceeded maximum " +
- "allowed retries number: " + MAX_RETRIES);
- }
- try {
- key.flush();
- fail("Expected exception not thrown");
- } catch (IOException ioe) {
- assertThat(ioe.getMessage()).contains("Stream is closed");
- }
+ });
+ assertInstanceOf(ContainerNotOpenException.class,
+ HddsClientUtils.checkForException(blockOutputStream.getIoException()));
+ assertThat(ioe.getMessage()).contains(
+ "Retry request failed. " +
+ "retries get failed due to exceeded maximum " +
+ "allowed retries number: " + MAX_RETRIES);
+
+ ioe = assertThrows(IOException.class, () -> key.flush());
+ assertThat(ioe.getMessage()).contains("Stream is closed");
+
try {
key.close();
- } catch (IOException ioe) {
+ } catch (IOException e) {
fail("Expected should not be thrown");
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
index 05c957d782..c2f1eb069c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneRpcClientAbstract.java
@@ -129,6 +129,7 @@ import static
org.apache.hadoop.hdds.StringUtils.string2Bytes;
import static org.apache.hadoop.hdds.client.ReplicationFactor.ONE;
import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
+import static org.apache.hadoop.ozone.OmUtils.LOG;
import static org.apache.hadoop.ozone.OmUtils.MAX_TRXN_ID;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.ACCESS;
import static org.apache.hadoop.ozone.OzoneAcl.AclScope.DEFAULT;
@@ -149,7 +150,6 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
@@ -2809,13 +2809,10 @@ public abstract class TestOzoneRpcClientAbstract {
String keyName2 = UUID.randomUUID().toString();
OzoneBucket bucket2 = client.getObjectStore().getVolume(volumeName)
.getBucket(bucketName);
- try {
- initiateMultipartUpload(bucket2, keyName2, anyReplication());
- fail("User without permission should fail");
- } catch (Exception e) {
- OMException ome = assertInstanceOf(OMException.class, e);
- assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult());
- }
+ OMException ome =
+ assertThrows(OMException.class, () ->
initiateMultipartUpload(bucket2, keyName2, anyReplication()),
+ "User without permission should fail");
+ assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult());
// Add create permission for user, and try multi-upload init again
OzoneAcl acl7 = new OzoneAcl(USER, userName, ACLType.CREATE, DEFAULT);
@@ -2844,12 +2841,12 @@ public abstract class TestOzoneRpcClientAbstract {
completeMultipartUpload(bucket2, keyName2, uploadId, partsMap);
// User without permission cannot read multi-uploaded object
- try (OzoneInputStream ignored = bucket2.readKey(keyName)) {
- fail("User without permission should fail");
- } catch (Exception e) {
- OMException ome = assertInstanceOf(OMException.class, e);
- assertEquals(ResultCodes.PERMISSION_DENIED, ome.getResult());
- }
+ OMException ex = assertThrows(OMException.class, () -> {
+ try (OzoneInputStream ignored = bucket2.readKey(keyName)) {
+ LOG.error("User without permission should fail");
+ }
+ }, "User without permission should fail");
+ assertEquals(ResultCodes.PERMISSION_DENIED, ex.getResult());
}
}
@@ -3053,14 +3050,8 @@ public abstract class TestOzoneRpcClientAbstract {
// Abort before completing part upload.
bucket.abortMultipartUpload(keyName, omMultipartInfo.getUploadID());
-
- try {
- ozoneOutputStream.close();
- fail("testAbortUploadFailWithInProgressPartUpload failed");
- } catch (IOException ex) {
- OMException ome = assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
- }
+ OMException ome = assertThrows(OMException.class, () ->
ozoneOutputStream.close());
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
}
@Test
@@ -3115,14 +3106,8 @@ public abstract class TestOzoneRpcClientAbstract {
String part1 = new String(data, UTF_8);
sb.append(part1);
assertEquals(sb.toString(), new String(fileContent, UTF_8));
-
- try {
- ozoneOutputStream.close();
- fail("testCommitPartAfterCompleteUpload failed");
- } catch (IOException ex) {
- OMException ome = assertInstanceOf(OMException.class, ex);
- assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ome.getResult());
- }
+ OMException ex = assertThrows(OMException.class, ozoneOutputStream::close);
+ assertEquals(NO_SUCH_MULTIPART_UPLOAD_ERROR, ex.getResult());
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
index b053a4394b..69a1032015 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestWatchForCommit.java
@@ -69,7 +69,7 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import org.apache.ratis.protocol.exceptions.GroupMismatchException;
import org.junit.jupiter.api.AfterEach;
@@ -275,25 +275,20 @@ public class TestWatchForCommit {
cluster.getStorageContainerManager()
.getPipelineManager().closePipeline(pipeline, false);
// again write data with more than max buffer limit. This wi
- try {
- // just watch for a log index which in not updated in the commitInfo
Map
- // as well as there is no logIndex generate in Ratis.
- // The basic idea here is just to test if its throws an exception.
- xceiverClient
- .watchForCommit(index + RandomUtils.nextInt(0, 100) + 10);
- fail("expected exception not thrown");
- } catch (Exception e) {
- assertInstanceOf(ExecutionException.class, e);
- // since the timeout value is quite long, the watch request will either
- // fail with NotReplicated exceptio, RetryFailureException or
- // RuntimeException
- assertFalse(HddsClientUtils
- .checkForException(e) instanceof TimeoutException);
- // client should not attempt to watch with
- // MAJORITY_COMMITTED replication level, except the grpc IO issue
- if (!logCapturer.getOutput().contains("Connection refused")) {
-
assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED");
- }
+ // just watch for a log index which in not updated in the commitInfo Map
+ // as well as there is no logIndex generate in Ratis.
+ // The basic idea here is just to test if its throws an exception.
+ ExecutionException e = assertThrows(ExecutionException.class,
+ () -> xceiverClient.watchForCommit(index + RandomUtils.nextInt(0,
100) + 10));
+ // since the timeout value is quite long, the watch request will either
+ // fail with NotReplicated exceptio, RetryFailureException or
+ // RuntimeException
+ assertFalse(HddsClientUtils
+ .checkForException(e) instanceof TimeoutException);
+ // client should not attempt to watch with
+ // MAJORITY_COMMITTED replication level, except the grpc IO issue
+ if (!logCapturer.getOutput().contains("Connection refused")) {
+ assertThat(e.getMessage()).doesNotContain("Watch-MAJORITY_COMMITTED");
}
clientManager.releaseClient(xceiverClient, false);
}
@@ -368,17 +363,13 @@ public class TestWatchForCommit {
List<Pipeline> pipelineList = new ArrayList<>();
pipelineList.add(pipeline);
TestHelper.waitForPipelineClose(pipelineList, cluster);
- try {
- // just watch for a log index which in not updated in the commitInfo
Map
- // as well as there is no logIndex generate in Ratis.
- // The basic idea here is just to test if its throws an exception.
- xceiverClient
- .watchForCommit(reply.getLogIndex() +
- RandomUtils.nextInt(0, 100) + 10);
- fail("Expected exception not thrown");
- } catch (Exception e) {
- assertInstanceOf(GroupMismatchException.class,
HddsClientUtils.checkForException(e));
- }
+ // just watch for a log index which in not updated in the commitInfo Map
+ // as well as there is no logIndex generate in Ratis.
+ // The basic idea here is just to test if its throws an exception.
+ Exception e =
+ assertThrows(Exception.class,
+ () -> xceiverClient.watchForCommit(reply.getLogIndex() +
RandomUtils.nextInt(0, 100) + 10));
+ assertInstanceOf(GroupMismatchException.class,
HddsClientUtils.checkForException(e));
clientManager.releaseClient(xceiverClient, false);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
index 54150ce90e..ec22bbd39f 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestAddRemoveOzoneManager.java
@@ -65,8 +65,8 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for OM bootstrap process.
@@ -249,21 +249,18 @@ public class TestAddRemoveOzoneManager {
// Bootstrap a new node without updating the configs on existing OMs.
// This should result in the bootstrap failing.
- String newNodeId = "omNode-bootstrap-1";
- try {
- cluster.bootstrapOzoneManager(newNodeId, false, false);
- fail("Bootstrap should have failed as configs are not updated on" +
- " all OMs.");
- } catch (Exception e) {
- assertEquals(OmUtils.getOMAddressListPrintString(
- Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or"
+
- " have incorrect information of the bootstrapping OM. Update their "
+
- "ozone-site.xml before proceeding.", e.getMessage());
- assertThat(omLog.getOutput()).contains("Remote OM config check " +
- "failed on OM " + existingOMNodeId);
- assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId +
- " - System Exit");
- }
+ final String newNodeId = "omNode-bootstrap-1";
+ Exception e =
+ assertThrows(Exception.class, () ->
cluster.bootstrapOzoneManager(newNodeId, false, false),
+ "Bootstrap should have failed as configs are not updated on all
OMs.");
+ assertEquals(OmUtils.getOMAddressListPrintString(
+ Lists.newArrayList(existingOM.getNodeDetails())) + " do not have or" +
+ " have incorrect information of the bootstrapping OM. Update their " +
+ "ozone-site.xml before proceeding.", e.getMessage());
+ assertThat(omLog.getOutput()).contains("Remote OM config check " +
+ "failed on OM " + existingOMNodeId);
+ assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId +
+ " - System Exit");
/***************************************************************************
* 2. Force bootstrap without updating config on any OM -> fail
@@ -276,15 +273,15 @@ public class TestAddRemoveOzoneManager {
miniOzoneClusterLog.clearOutput();
omLog.clearOutput();
- newNodeId = "omNode-bootstrap-2";
+ String newNodeId1 = "omNode-bootstrap-2";
try {
- cluster.bootstrapOzoneManager(newNodeId, false, true);
- } catch (IOException e) {
+ cluster.bootstrapOzoneManager(newNodeId1, false, true);
+ } catch (IOException ex) {
assertThat(omLog.getOutput()).contains("Couldn't add OM " +
- newNodeId + " to peer list.");
+ newNodeId1 + " to peer list.");
assertThat(miniOzoneClusterLog.getOutput()).contains(
existingOMNodeId + " - System Exit: There is no OM configuration " +
- "for node ID " + newNodeId + " in ozone-site.xml.");
+ "for node ID " + newNodeId1 + " in ozone-site.xml.");
// Verify that the existing OM has stopped.
assertFalse(cluster.getOzoneManager(existingOMNodeId).isRunning());
@@ -323,23 +320,20 @@ public class TestAddRemoveOzoneManager {
**************************************************************************/
// Update configs on all active OMs and Bootstrap a new node
- String newNodeId = "omNode-bootstrap-1";
- try {
- cluster.bootstrapOzoneManager(newNodeId, true, false);
- fail("Bootstrap should have failed as configs are not updated on" +
- " all OMs.");
- } catch (IOException e) {
- assertEquals(OmUtils.getOMAddressListPrintString(
- Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " +
- "have incorrect information of the bootstrapping OM. Update their " +
- "ozone-site.xml before proceeding.", e.getMessage());
- assertThat(omLog.getOutput()).contains("Remote OM " + downOMNodeId +
- " configuration returned null");
- assertThat(omLog.getOutput()).contains("Remote OM config check " +
- "failed on OM " + downOMNodeId);
- assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId +
- " - System Exit");
- }
+ final String newNodeId = "omNode-bootstrap-1";
+ IOException e =
+ assertThrows(IOException.class, () ->
cluster.bootstrapOzoneManager(newNodeId, true, false),
+ "Bootstrap should have failed as configs are not updated on all
OMs.");
+ assertEquals(OmUtils.getOMAddressListPrintString(
+ Lists.newArrayList(downOM.getNodeDetails())) + " do not have or " +
+ "have incorrect information of the bootstrapping OM. Update their " +
+ "ozone-site.xml before proceeding.", e.getMessage());
+ assertThat(omLog.getOutput()).contains("Remote OM " + downOMNodeId +
+ " configuration returned null");
+ assertThat(omLog.getOutput()).contains("Remote OM config check " +
+ "failed on OM " + downOMNodeId);
+ assertThat(miniOzoneClusterLog.getOutput()).contains(newNodeId +
+ " - System Exit");
/***************************************************************************
* 2. Force bootstrap (with 1 node down and updated configs on rest) ->
pass
@@ -349,9 +343,9 @@ public class TestAddRemoveOzoneManager {
omLog.clearOutput();
// Update configs on all active OMs and Force Bootstrap a new node
- newNodeId = "omNode-bootstrap-2";
- cluster.bootstrapOzoneManager(newNodeId, true, true);
- OzoneManager newOM = cluster.getOzoneManager(newNodeId);
+ String newNodeId1 = "omNode-bootstrap-2";
+ cluster.bootstrapOzoneManager(newNodeId1, true, true);
+ OzoneManager newOM = cluster.getOzoneManager(newNodeId1);
// Verify that the newly bootstrapped OM is running
assertTrue(newOM.isRunning());
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
index 0af4925dbc..b26c3cbc58 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketOwner.java
@@ -47,8 +47,8 @@ import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static
org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLIdentityType.USER;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test for Ozone Bucket Owner.
@@ -139,65 +139,54 @@ public class TestBucketOwner {
//Key Create
UserGroupInformation.setLoginUser(user3);
try (OzoneClient client = cluster.newClient()) {
- OzoneVolume volume = client.getObjectStore()
- .getVolume("volume1");
- OzoneBucket ozoneBucket = volume.getBucket("bucket1");
- createKey(ozoneBucket, "key3", 10, new byte[10]);
- fail("Create key as non-volume and non-bucket owner should fail");
- } catch (Exception ex) {
- LOG.info(ex.getMessage());
+ assertThrows(Exception.class, () -> {
+ OzoneVolume volume = client.getObjectStore().getVolume("volume1");
+ OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+ createKey(ozoneBucket, "key3", 10, new byte[10]);
+ }, "Create key as non-volume and non-bucket owner should fail");
}
//Key Delete - should fail
try (OzoneClient client = cluster.newClient()) {
- OzoneVolume volume = client.getObjectStore()
- .getVolume("volume1");
- OzoneBucket ozoneBucket = volume.getBucket("bucket1");
- ozoneBucket.deleteKey("key2");
- fail("Delete key as non-volume and non-bucket owner should fail");
- } catch (Exception ex) {
- LOG.info(ex.getMessage());
+ assertThrows(Exception.class, () -> {
+ OzoneVolume volume = client.getObjectStore().getVolume("volume1");
+ OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+ ozoneBucket.deleteKey("key2");
+ }, "Delete key as non-volume and non-bucket owner should fail");
}
//Key Rename - should fail
try (OzoneClient client = cluster.newClient()) {
- OzoneVolume volume = client.getObjectStore()
- .getVolume("volume1");
- OzoneBucket ozoneBucket = volume.getBucket("bucket1");
- ozoneBucket.renameKey("key2", "key4");
- fail("Rename key as non-volume and non-bucket owner should fail");
- } catch (Exception ex) {
- LOG.info(ex.getMessage());
+ assertThrows(Exception.class, () -> {
+ OzoneVolume volume = client.getObjectStore().getVolume("volume1");
+ OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+ ozoneBucket.renameKey("key2", "key4");
+ }, "Rename key as non-volume and non-bucket owner should fail");
}
//List Keys - should fail
try (OzoneClient client = cluster.newClient()) {
- OzoneVolume volume = client.getObjectStore()
- .getVolume("volume1");
- OzoneBucket ozoneBucket = volume.getBucket("bucket1");
- ozoneBucket.listKeys("key");
- fail("List keys as non-volume and non-bucket owner should fail");
- } catch (Exception ex) {
- LOG.info(ex.getMessage());
+ assertThrows(Exception.class, () -> {
+ OzoneVolume volume = client.getObjectStore().getVolume("volume1");
+ OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+ ozoneBucket.listKeys("key");
+ }, "List keys as non-volume and non-bucket owner should fail");
}
//Get Acls - should fail
try (OzoneClient client = cluster.newClient()) {
- OzoneVolume volume = client.getObjectStore()
- .getVolume("volume1");
- OzoneBucket ozoneBucket = volume.getBucket("bucket1");
- ozoneBucket.getAcls();
- fail("Get Acls as non-volume and non-bucket owner should fail");
- } catch (Exception ex) {
- LOG.info(ex.getMessage());
+ assertThrows(Exception.class, () -> {
+ OzoneVolume volume = client.getObjectStore().getVolume("volume1");
+ OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+ ozoneBucket.getAcls();
+ }, "Get Acls as non-volume and non-bucket owner should fail");
}
+
//Add Acls - should fail
try (OzoneClient client = cluster.newClient()) {
- OzoneVolume volume = client.getObjectStore()
- .getVolume("volume1");
- OzoneBucket ozoneBucket = volume.getBucket("bucket1");
- OzoneAcl acl = new OzoneAcl(USER, "testuser1",
- IAccessAuthorizer.ACLType.ALL, DEFAULT);
- ozoneBucket.addAcl(acl);
- fail("Add Acls as non-volume and non-bucket owner should fail");
- } catch (Exception ex) {
- LOG.info(ex.getMessage());
+ assertThrows(Exception.class, () -> {
+ OzoneVolume volume = client.getObjectStore().getVolume("volume1");
+ OzoneBucket ozoneBucket = volume.getBucket("bucket1");
+ OzoneAcl acl = new OzoneAcl(USER, "testuser1",
+ IAccessAuthorizer.ACLType.ALL, DEFAULT);
+ ozoneBucket.addAcl(acl);
+ }, "Add Acls as non-volume and non-bucket owner should fail");
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
index f70e223dd5..67ab3169b6 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerImpl.java
@@ -132,7 +132,6 @@ import static
org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assumptions.assumeFalse;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.anyInt;
@@ -381,13 +380,11 @@ public class TestKeyManagerImpl {
keyArgs.setLocationInfoList(
keySession.getKeyInfo().getLatestVersionLocations().getLocationList());
writeClient.commitKey(keyArgs, keySession.getId());
- try {
- writeClient.createDirectory(keyArgs);
- fail("Creation should fail for directory.");
- } catch (OMException e) {
- assertEquals(e.getResult(),
- OMException.ResultCodes.FILE_ALREADY_EXISTS);
- }
+ OmKeyArgs finalKeyArgs = keyArgs;
+ OMException e =
+ assertThrows(OMException.class, () ->
writeClient.createDirectory(finalKeyArgs),
+ "Creation should fail for directory.");
+ assertEquals(e.getResult(), OMException.ResultCodes.FILE_ALREADY_EXISTS);
// create directory where parent is root
keyName = RandomStringUtils.randomAlphabetic(5);
@@ -414,13 +411,12 @@ public class TestKeyManagerImpl {
writeClient.commitKey(keyArgs, keySession.getId());
// try to open created key with overWrite flag set to false
- try {
- writeClient.createFile(keyArgs, false, false);
- fail("Open key should fail for non overwrite create");
- } catch (OMException ex) {
- if (ex.getResult() != OMException.ResultCodes.FILE_ALREADY_EXISTS) {
- throw ex;
- }
+ OmKeyArgs finalKeyArgs = keyArgs;
+ OMException ex =
+ assertThrows(OMException.class, () ->
writeClient.createFile(finalKeyArgs, false, false),
+ "Open key should fail for non overwrite create");
+ if (ex.getResult() != OMException.ResultCodes.FILE_ALREADY_EXISTS) {
+ throw ex;
}
// create file should pass with overwrite flag set to true
@@ -437,13 +433,12 @@ public class TestKeyManagerImpl {
keyArgs = createBuilder()
.setKeyName(keyName)
.build();
- try {
- writeClient.createFile(keyArgs, false, false);
- fail("Open file should fail for non recursive write");
- } catch (OMException ex) {
- if (ex.getResult() != OMException.ResultCodes.DIRECTORY_NOT_FOUND) {
- throw ex;
- }
+ OmKeyArgs finalKeyArgs1 = keyArgs;
+ ex =
+ assertThrows(OMException.class, () ->
writeClient.createFile(finalKeyArgs1, false, false),
+ "Open file should fail for non recursive write");
+ if (ex.getResult() != OMException.ResultCodes.DIRECTORY_NOT_FOUND) {
+ throw ex;
}
// file create should pass when recursive flag is set to true
@@ -458,13 +453,11 @@ public class TestKeyManagerImpl {
keyArgs = createBuilder()
.setKeyName("")
.build();
- try {
- writeClient.createFile(keyArgs, true, true);
- fail("Open file should fail for non recursive write");
- } catch (OMException ex) {
- if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) {
- throw ex;
- }
+ OmKeyArgs finalKeyArgs2 = keyArgs;
+ ex = assertThrows(OMException.class, () ->
writeClient.createFile(finalKeyArgs2, true, true),
+ "Open file should fail for non recursive write");
+ if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) {
+ throw ex;
}
}
@@ -739,13 +732,12 @@ public class TestKeyManagerImpl {
.build();
// lookup for a non-existent file
- try {
- keyManager.lookupFile(keyArgs, null);
- fail("Lookup file should fail for non existent file");
- } catch (OMException ex) {
- if (ex.getResult() != OMException.ResultCodes.FILE_NOT_FOUND) {
- throw ex;
- }
+ OmKeyArgs finalKeyArgs = keyArgs;
+ OMException ex =
+ assertThrows(OMException.class, () ->
keyManager.lookupFile(finalKeyArgs, null),
+ "Lookup file should fail for non existent file");
+ if (ex.getResult() != OMException.ResultCodes.FILE_NOT_FOUND) {
+ throw ex;
}
// create a file
@@ -760,13 +752,11 @@ public class TestKeyManagerImpl {
keyArgs = createBuilder()
.setKeyName("")
.build();
- try {
- keyManager.lookupFile(keyArgs, null);
- fail("Lookup file should fail for a directory");
- } catch (OMException ex) {
- if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) {
- throw ex;
- }
+ OmKeyArgs finalKeyArgs1 = keyArgs;
+ ex = assertThrows(OMException.class, () ->
keyManager.lookupFile(finalKeyArgs1, null),
+ "Lookup file should fail for a directory");
+ if (ex.getResult() != OMException.ResultCodes.NOT_A_FILE) {
+ throw ex;
}
}
@@ -782,13 +772,11 @@ public class TestKeyManagerImpl {
.setSortDatanodesInPipeline(true)
.build();
// lookup for a non-existent key
- try {
- keyManager.lookupKey(keyArgs, resolvedBucket(), null);
- fail("Lookup key should fail for non existent key");
- } catch (OMException ex) {
- if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) {
- throw ex;
- }
+ OMException ex =
+ assertThrows(OMException.class, () -> keyManager.lookupKey(keyArgs,
resolvedBucket(), null),
+ "Lookup key should fail for non existent key");
+ if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) {
+ throw ex;
}
// create a key
@@ -877,13 +865,12 @@ public class TestKeyManagerImpl {
.build();
// lookup for a non-existent key
- try {
- keyManager.lookupKey(keyArgs, resolvedBucket(), null);
- fail("Lookup key should fail for non existent key");
- } catch (OMException ex) {
- if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) {
- throw ex;
- }
+ OmKeyArgs finalKeyArgs = keyArgs;
+ OMException ex =
+ assertThrows(OMException.class, () ->
keyManager.lookupKey(finalKeyArgs, resolvedBucket(), null),
+ "Lookup key should fail for non existent key");
+ if (ex.getResult() != OMException.ResultCodes.KEY_NOT_FOUND) {
+ throw ex;
}
// create a key
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java
index 5349727ff5..2f4df748ee 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStore.java
@@ -35,7 +35,7 @@ import java.io.IOException;
import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Tests to verify Object store without prefix enabled.
@@ -220,14 +220,12 @@ public class TestObjectStore {
createLinkBucket(volume, linkBucket2Name, linkBucket3Name);
createLinkBucket(volume, linkBucket3Name, linkBucket1Name);
- try {
- volume.getBucket(linkBucket1Name);
- fail("Should throw Exception due to loop in Link Buckets");
- } catch (OMException oe) {
- // Expected exception
- assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS,
- oe.getResult());
- }
+ OMException oe =
+ assertThrows(OMException.class, () ->
volume.getBucket(linkBucket1Name),
+ "Should throw Exception due to loop in Link Buckets");
+ // Expected exception
+ assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS,
+ oe.getResult());
}
/**
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
index f9a6940246..81c11eb9cd 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
@@ -76,6 +76,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
@@ -251,40 +252,25 @@ public class TestObjectStoreWithFSO {
ozoneBucket.createKey(key, 10).close();
assertFalse(cluster.getOzoneManager().getMetadataManager().isBucketEmpty(
testVolumeName, testBucketName));
-
- try {
- // Try to delete the bucket while a key is present under it.
- ozoneVolume.deleteBucket(testBucketName);
- fail("Bucket Deletion should fail, since bucket is not empty.");
- } catch (IOException ioe) {
- // Do nothing
- }
+ // Try to delete the bucket while a key is present under it.
+ assertThrows(IOException.class, () ->
ozoneVolume.deleteBucket(testBucketName),
+ "Bucket Deletion should fail, since bucket is not empty.");
// Delete the key (this only deletes the file)
ozoneBucket.deleteKey(key);
assertFalse(cluster.getOzoneManager().getMetadataManager()
.isBucketEmpty(testVolumeName, testBucketName));
- try {
- // Try to delete the bucket while intermediate dirs are present under it.
- ozoneVolume.deleteBucket(testBucketName);
- fail("Bucket Deletion should fail, since bucket still contains " +
- "intermediate directories");
- } catch (IOException ioe) {
- // Do nothing
- }
+ // Try to delete the bucket while intermediate dirs are present under it.
+ assertThrows(IOException.class, () ->
ozoneVolume.deleteBucket(testBucketName),
+ "Bucket Deletion should fail, since bucket still contains intermediate
directories");
// Delete last level of directories.
ozoneBucket.deleteDirectory(parent, true);
assertFalse(cluster.getOzoneManager().getMetadataManager()
.isBucketEmpty(testVolumeName, testBucketName));
- try {
- // Try to delete the bucket while dirs are present under it.
- ozoneVolume.deleteBucket(testBucketName);
- fail("Bucket Deletion should fail, since bucket still contains " +
- "intermediate directories");
- } catch (IOException ioe) {
- // Do nothing
- }
+ // Try to delete the bucket while dirs are present under it.
+ assertThrows(IOException.class, () ->
ozoneVolume.deleteBucket(testBucketName),
+ "Bucket Deletion should fail, since bucket still contains intermediate
directories");
// Delete all the intermediate directories
ozoneBucket.deleteDirectory("a/", true);
@@ -331,14 +317,11 @@ public class TestObjectStoreWithFSO {
data.length());
// open key
- try {
- ozoneBucket.getKey(key);
- fail("Should throw exception as fileName is not visible and its still " +
- "open for writing!");
- } catch (OMException ome) {
- // expected
- assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
- }
+ OMException ome =
+ assertThrows(OMException.class, () -> ozoneBucket.getKey(key),
+ "Should throw exception as fileName is not visible and its still
open for writing!");
+ // expected
+ assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
ozoneOutputStream.close();
@@ -358,13 +341,10 @@ public class TestObjectStoreWithFSO {
ozoneBucket.deleteKey(key);
// get deleted key
- try {
- ozoneBucket.getKey(key);
- fail("Should throw exception as fileName not exists!");
- } catch (OMException ome) {
- // expected
- assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
- }
+ ome = assertThrows(OMException.class, () -> ozoneBucket.getKey(key),
+ "Should throw exception as fileName not exists!");
+ // expected
+ assertEquals(ome.getResult(), OMException.ResultCodes.KEY_NOT_FOUND);
// after key delete
verifyKeyInFileTable(fileTable, fileName, dirPathC.getObjectID(), true);
@@ -658,13 +638,10 @@ public class TestObjectStoreWithFSO {
bucket.renameKey(fromKeyName, toKeyName);
// Lookup for old key should fail.
- try {
- bucket.getKey(fromKeyName);
- fail("Lookup for old from key name should fail!");
- } catch (OMException ome) {
- assertEquals(KEY_NOT_FOUND, ome.getResult());
- }
-
+ OMException e =
+ assertThrows(OMException.class, () -> bucket.getKey(fromKeyName),
+ "Lookup for old from key name should fail!");
+ assertEquals(KEY_NOT_FOUND, e.getResult());
OzoneKey key = bucket.getKey(toKeyName);
assertEquals(toKeyName, key.getName());
}
@@ -707,13 +684,10 @@ public class TestObjectStoreWithFSO {
OzoneBucket bucket = volume.getBucket(bucketName);
createTestKey(bucket, keyName1, value);
createTestKey(bucket, keyName2, value);
-
- try {
- bucket.renameKey(keyName1, keyName2);
- fail("Should throw exception as destin key already exists!");
- } catch (OMException e) {
- assertEquals(KEY_ALREADY_EXISTS, e.getResult());
- }
+ OMException e =
+ assertThrows(OMException.class, () -> bucket.renameKey(keyName1,
keyName2),
+ "Should throw exception as destin key already exists!");
+ assertEquals(KEY_ALREADY_EXISTS, e.getResult());
}
@Test
@@ -770,13 +744,11 @@ public class TestObjectStoreWithFSO {
}
private void assertKeyRenamedEx(OzoneBucket bucket, String keyName)
- throws Exception {
- try {
- bucket.getKey(keyName);
- fail("Should throw KeyNotFound as the key got renamed!");
- } catch (OMException ome) {
- assertEquals(KEY_NOT_FOUND, ome.getResult());
- }
+ throws Exception {
+ OMException ome =
+ assertThrows(OMException.class, () -> bucket.getKey(keyName),
+ "Should throw KeyNotFound as the key got renamed!");
+ assertEquals(KEY_NOT_FOUND, ome.getResult());
}
private void createTestKey(OzoneBucket bucket, String keyName,
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
index 0b00f9b578..be2e0a9652 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithLegacyFS.java
@@ -59,6 +59,7 @@ import java.util.UUID;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -204,14 +205,11 @@ public class TestObjectStoreWithLegacyFS {
omBucketArgs = builder.build();
volume.createBucket(legacyBuckName, omBucketArgs);
bucket = volume.getBucket(legacyBuckName);
-
- try {
- uploadMPUWithDirectoryExists(bucket, keyName);
- fail("Must throw error as there is " +
- "already directory in the given path");
- } catch (OMException ome) {
- assertEquals(OMException.ResultCodes.NOT_A_FILE, ome.getResult());
- }
+ OzoneBucket finalBucket = bucket;
+ OMException ome =
+ assertThrows(OMException.class, () ->
uploadMPUWithDirectoryExists(finalBucket, keyName),
+ "Must throw error as there is " + "already directory in the given
path");
+ assertEquals(OMException.ResultCodes.NOT_A_FILE, ome.getResult());
}
private OmMultipartUploadCompleteInfo uploadMPUWithDirectoryExists(
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
index 89acf321e3..f44a5cbea8 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerConfiguration.java
@@ -377,15 +377,10 @@ public class TestOzoneManagerConfiguration {
String omServiceId = "service1";
conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
// Deliberately skip OZONE_OM_NODES_KEY and OZONE_OM_ADDRESS_KEY config
-
- try {
- startCluster();
- fail("Should have failed to start the cluster!");
- } catch (OzoneIllegalArgumentException e) {
- // Expect error message
- assertTrue(e.getMessage().contains(
- "List of OM Node ID's should be specified"));
- }
+ OzoneIllegalArgumentException e =
+ assertThrows(OzoneIllegalArgumentException.class, () ->
startCluster());
+ // Expect error message
+ assertTrue(e.getMessage().contains("List of OM Node ID's should be
specified"));
}
/**
@@ -407,15 +402,9 @@ public class TestOzoneManagerConfiguration {
conf.set(OMConfigKeys.OZONE_OM_SERVICE_IDS_KEY, omServiceId);
conf.set(omNodesKey, omNodesKeyValue);
// Deliberately skip OZONE_OM_ADDRESS_KEY config
-
- try {
- startCluster();
- fail("Should have failed to start the cluster!");
- } catch (OzoneIllegalArgumentException e) {
- // Expect error message
- assertTrue(e.getMessage().contains(
- "OM RPC Address should be set for all node"));
- }
+ OzoneIllegalArgumentException e =
assertThrows(OzoneIllegalArgumentException.class, () -> startCluster());
+ // Expect error message
+ assertTrue(e.getMessage().contains("OM RPC Address should be set for all
node"));
}
/**
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java
index 3abcba4a9a..16e6ad086c 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumes.java
@@ -47,8 +47,8 @@ import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_VOLUME_LISTALL_ALLOWED;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
@@ -237,20 +237,17 @@ public class TestOzoneManagerListVolumes {
}
assertEquals(5, count);
} else {
- try {
- objectStore.listVolumes("volume");
- fail("listAllVolumes should fail for " + user.getUserName());
- } catch (RuntimeException ex) {
- // Current listAllVolumes throws RuntimeException
- if (ex.getCause() instanceof OMException) {
- // Expect PERMISSION_DENIED
- if (((OMException) ex.getCause()).getResult() !=
- OMException.ResultCodes.PERMISSION_DENIED) {
- throw ex;
- }
- } else {
+ RuntimeException ex =
+ assertThrows(RuntimeException.class, () ->
objectStore.listVolumes("volume"));
+ // Current listAllVolumes throws RuntimeException
+ if (ex.getCause() instanceof OMException) {
+ // Expect PERMISSION_DENIED
+ if (((OMException) ex.getCause()).getResult() !=
+ OMException.ResultCodes.PERMISSION_DENIED) {
throw ex;
}
+ } else {
+ throw ex;
}
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java
index 4699cbb702..e19ecf1a9d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestRecursiveAclWithFSO.java
@@ -52,8 +52,8 @@ import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_AUTHORIZER_CLASS
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
import static org.apache.hadoop.ozone.security.acl.OzoneObj.StoreType.OZONE;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* Test recursive acl checks for delete and rename for FSO Buckets.
@@ -168,23 +168,17 @@ public class TestRecursiveAclWithFSO {
OzoneBucket ozoneBucket = volume.getBucket("bucket1");
// perform delete
- try {
- ozoneBucket.deleteDirectory("a/b2", true);
- fail("Should throw permission denied !");
- } catch (OMException ome) {
- // expect permission error
- assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
- ome.getResult(), "Permission check failed");
- }
+ OMException e =
+ assertThrows(OMException.class, () ->
ozoneBucket.deleteDirectory("a/b2", true));
+ // expect permission error
+ assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
+ e.getResult(), "Permission check failed");
+
// perform rename
- try {
- ozoneBucket.renameKey("a/b2", "a/b2_renamed");
- fail("Should throw permission denied !");
- } catch (OMException ome) {
- // expect permission error
- assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
- ome.getResult(), "Permission check failed");
- }
+ e = assertThrows(OMException.class, () -> ozoneBucket.renameKey("a/b2",
"a/b2_renamed"));
+ // expect permission error
+ assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
+ e.getResult(), "Permission check failed");
// Test case 2
// Remove acl from directory c2, delete/rename a/b1 should throw
@@ -200,35 +194,23 @@ public class TestRecursiveAclWithFSO {
UserGroupInformation.setLoginUser(user2);
// perform delete
- try {
- ozoneBucket.deleteDirectory("a/b1", true);
- fail("Should throw permission denied !");
- } catch (OMException ome) {
- // expect permission error
- assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
- ome.getResult(), "Permission check failed");
- }
+ e = assertThrows(OMException.class, () ->
ozoneBucket.deleteDirectory("a/b1", true));
+ // expect permission error
+ assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
+ e.getResult(), "Permission check failed");
// perform rename
- try {
- ozoneBucket.renameKey("a/b1", "a/b1_renamed");
- fail("Should throw permission denied !");
- } catch (OMException ome) {
- // expect permission error
- assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
- ome.getResult(), "Permission check failed");
- }
+ e = assertThrows(OMException.class, () -> ozoneBucket.renameKey("a/b1",
"a/b1_renamed"));
+ // expect permission error
+ assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
+ e.getResult(), "Permission check failed");
// Test case 3
// delete b3 and this should throw exception because user2 has no acls
- try {
- ozoneBucket.deleteDirectory("a/b3", true);
- fail("Should throw permission denied !");
- } catch (OMException ome) {
- // expect permission error
- assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
- ome.getResult(), "Permission check failed");
- }
+ e = assertThrows(OMException.class, () ->
ozoneBucket.deleteDirectory("a/b3", true));
+ // expect permission error
+ assertEquals(OMException.ResultCodes.PERMISSION_DENIED,
+ e.getResult(), "Permission check failed");
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
index b6c9ab60ee..ec61970ee2 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/service/TestRangerBGSyncService.java
@@ -77,7 +77,9 @@ import static
org.apache.hadoop.ozone.om.OMMultiTenantManager.OZONE_TENANT_RANGE
import static
org.apache.hadoop.security.authentication.util.KerberosName.DEFAULT_MECHANISM;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.doAnswer;
@@ -487,31 +489,15 @@ public class TestRangerBGSyncService {
// by OzoneManager Multi-Tenancy tables are cleaned up by sync thread
for (String policy : policiesCreated) {
- try {
- final Policy policyRead = accessController.getPolicy(policy);
- fail("The policy should have been deleted: " + policyRead);
- } catch (IOException ex) {
- if (!(ex.getCause() instanceof RangerServiceException)) {
- fail("Expected RangerServiceException, got " +
- ex.getCause().getClass().getSimpleName());
- }
- RangerServiceException rse = (RangerServiceException) ex.getCause();
- assertEquals(404, rse.getStatus().getStatusCode());
- }
+ IOException ex = assertThrows(IOException.class, () ->
accessController.getPolicy(policy));
+ RangerServiceException rse =
assertInstanceOf(RangerServiceException.class, ex.getCause());
+ assertEquals(404, rse.getStatus().getStatusCode());
}
for (String roleName : rolesCreated) {
- try {
- final Role role = accessController.getRole(roleName);
- fail("This role should have been deleted from Ranger: " + role);
- } catch (IOException ex) {
- if (!(ex.getCause() instanceof RangerServiceException)) {
- fail("Expected RangerServiceException, got " +
- ex.getCause().getClass().getSimpleName());
- }
- RangerServiceException rse = (RangerServiceException) ex.getCause();
- assertEquals(400, rse.getStatus().getStatusCode());
- }
+ IOException ex = assertThrows(IOException.class, () ->
accessController.getRole(roleName));
+ RangerServiceException rse =
assertInstanceOf(RangerServiceException.class, ex.getCause());
+ assertEquals(400, rse.getStatus().getStatusCode());
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java
index 5bbdd4feba..c40e2e009b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneDatanodeShell.java
@@ -36,7 +36,7 @@ import picocli.CommandLine.ParseResult;
import picocli.CommandLine.RunLast;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* This test class specified for testing Ozone datanode shell command.
@@ -90,17 +90,13 @@ public class TestOzoneDatanodeShell {
if (Strings.isNullOrEmpty(expectedError)) {
executeDatanode(hdds, args);
} else {
- try {
- executeDatanode(hdds, args);
- fail("Exception is expected from command execution " +
Arrays.asList(args));
- } catch (Exception ex) {
- if (!Strings.isNullOrEmpty(expectedError)) {
- Throwable exceptionToCheck = ex;
- if (exceptionToCheck.getCause() != null) {
- exceptionToCheck = exceptionToCheck.getCause();
- }
- assertThat(exceptionToCheck.getMessage()).contains(expectedError);
+ Exception ex = assertThrows(Exception.class, () -> executeDatanode(hdds,
args));
+ if (!Strings.isNullOrEmpty(expectedError)) {
+ Throwable exceptionToCheck = ex;
+ if (exceptionToCheck.getCause() != null) {
+ exceptionToCheck = exceptionToCheck.getCause();
}
+ assertThat(exceptionToCheck.getMessage()).contains(expectedError);
}
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
index 6bd3f5f22f..b3c4a523a3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneShellHA.java
@@ -90,7 +90,6 @@ import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
@@ -270,18 +269,13 @@ public class TestOzoneShellHA {
if (Strings.isNullOrEmpty(expectedError)) {
execute(shell, args);
} else {
- try {
- execute(shell, args);
- fail("Exception is expected from command execution " + Arrays
- .asList(args));
- } catch (Exception ex) {
- if (!Strings.isNullOrEmpty(expectedError)) {
- Throwable exceptionToCheck = ex;
- if (exceptionToCheck.getCause() != null) {
- exceptionToCheck = exceptionToCheck.getCause();
- }
- assertThat(exceptionToCheck.getMessage()).contains(expectedError);
+ Exception ex = assertThrows(Exception.class, () -> execute(shell, args));
+ if (!Strings.isNullOrEmpty(expectedError)) {
+ Throwable exceptionToCheck = ex;
+ if (exceptionToCheck.getCause() != null) {
+ exceptionToCheck = exceptionToCheck.getCause();
}
+ assertThat(exceptionToCheck.getMessage()).contains(expectedError);
}
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
index 8b9866aef4..a9ad4b4e38 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/shell/TestOzoneTenantShell.java
@@ -68,7 +68,7 @@ import static
org.apache.hadoop.ozone.om.OMMultiTenantManagerImpl.OZONE_OM_TENAN
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Integration test for Ozone tenant shell command. HA enabled.
@@ -254,17 +254,13 @@ public class TestOzoneTenantShell {
if (Strings.isNullOrEmpty(expectedError)) {
execute(shell, args);
} else {
- try {
- execute(shell, args);
- fail("Exception is expected from command execution " +
Arrays.asList(args));
- } catch (Exception ex) {
- if (!Strings.isNullOrEmpty(expectedError)) {
- Throwable exceptionToCheck = ex;
- if (exceptionToCheck.getCause() != null) {
- exceptionToCheck = exceptionToCheck.getCause();
- }
- assertThat(exceptionToCheck.getMessage()).contains(expectedError);
+ Exception ex = assertThrows(Exception.class, () -> execute(shell, args));
+ if (!Strings.isNullOrEmpty(expectedError)) {
+ Throwable exceptionToCheck = ex;
+ if (exceptionToCheck.getCause() != null) {
+ exceptionToCheck = exceptionToCheck.getCause();
}
+ assertThat(exceptionToCheck.getMessage()).contains(expectedError);
}
}
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java
index adab44d43f..0079585a85 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOMMultiTenantManager.java
@@ -43,8 +43,8 @@ import static
org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.FEAT
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.doCallRealMethod;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -116,12 +116,10 @@ public class TestOMMultiTenantManager {
*/
private void expectConfigCheckToFail(OzoneManager ozoneManager,
OzoneConfiguration conf) {
- try {
- OMMultiTenantManager.checkAndEnableMultiTenancy(ozoneManager, conf);
- fail("Should have thrown RuntimeException");
- } catch (RuntimeException e) {
- assertThat(e.getMessage()).contains("Failed to meet");
- }
+ RuntimeException e =
+ assertThrows(RuntimeException.class,
+ () ->
OMMultiTenantManager.checkAndEnableMultiTenancy(ozoneManager, conf));
+ assertThat(e.getMessage()).contains("Failed to meet");
}
/**
@@ -176,12 +174,9 @@ public class TestOMMultiTenantManager {
*/
private void expectWriteRequestToFail(OzoneManager om, OMRequest omRequest)
throws IOException {
- try {
- OzoneManagerRatisUtils.createClientRequest(omRequest, om);
- fail("Should have thrown OMException");
- } catch (OMException e) {
- assertEquals(FEATURE_NOT_ENABLED, e.getResult());
- }
+ OMException e =
+ assertThrows(OMException.class, () ->
OzoneManagerRatisUtils.createClientRequest(omRequest, om));
+ assertEquals(FEATURE_NOT_ENABLED, e.getResult());
}
/**
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java
index 966ef4e985..f72a687d6a 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/multitenant/TestMultiTenantAccessController.java
@@ -47,8 +47,8 @@ import static
org.apache.hadoop.ozone.om.OMMultiTenantManager.OZONE_TENANT_RANGE
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
/**
* To test MultiTenantAccessController with Ranger Client.
@@ -166,12 +166,7 @@ public class TestMultiTenantAccessController {
assertEquals(prevPolicyVersion + 2L, currPolicyVersion);
// get to check it is deleted.
- try {
- controller.getPolicy(policyName);
- fail("Expected exception for missing policy.");
- } catch (Exception ex) {
- // Expected since policy is not there.
- }
+ assertThrows(Exception.class, () -> controller.getPolicy(policyName));
}
@Test
@@ -194,12 +189,7 @@ public class TestMultiTenantAccessController {
.setName(policyName)
.addVolume(volumeName + "2")
.build();
- try {
- controller.createPolicy(sameNamePolicy);
- fail("Expected exception for duplicate policy.");
- } catch (Exception ex) {
- // Expected since a policy with the same name should not be allowed.
- }
+ assertThrows(Exception.class, () ->
controller.createPolicy(sameNamePolicy));
// Create a policy with different name but same resource.
// Check for error.
@@ -208,12 +198,7 @@ public class TestMultiTenantAccessController {
.setName(policyName + "2")
.addVolume(volumeName)
.build();
- try {
- controller.createPolicy(sameResourcePolicy);
- fail("Expected exception for duplicate policy.");
- } catch (Exception ex) {
- // Expected since a policy with the same resource should not be allowed.
- }
+ assertThrows(Exception.class, () ->
controller.createPolicy(sameResourcePolicy));
// delete policy.
controller.deletePolicy(policyName);
@@ -369,12 +354,7 @@ public class TestMultiTenantAccessController {
// delete role.
controller.deleteRole(roleName);
// get to check it is deleted.
- try {
- controller.getRole(roleName);
- fail("Expected exception for missing role.");
- } catch (Exception ex) {
- // Expected since policy is not there.
- }
+ assertThrows(Exception.class, () -> controller.getRole(roleName));
}
@Test
@@ -393,13 +373,7 @@ public class TestMultiTenantAccessController {
.setName(roleName)
.setDescription(OZONE_TENANT_RANGER_ROLE_DESCRIPTION)
.build();
- try {
- controller.createRole(sameNameRole);
- fail("Expected exception for duplicate role.");
- } catch (Exception ex) {
- // Expected since a policy with the same name should not be allowed.
- }
-
+ assertThrows(Exception.class, () -> controller.createRole(sameNameRole));
// delete role.
controller.deleteRole(roleName);
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
index 181f434378..93997826bf 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerStateMachine.java
@@ -49,7 +49,7 @@ import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertSame;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -180,19 +180,15 @@ public class TestOzoneManagerStateMachine {
prepareState.getState().getStatus());
// Submitting a write request should now fail.
- try {
- ozoneManagerStateMachine.preAppendTransaction(
- mockTransactionContext(createKeyRequest));
- fail("Expected StateMachineException to be thrown when " +
- "submitting write request while prepared.");
- } catch (StateMachineException smEx) {
- assertFalse(smEx.leaderShouldStepDown());
-
- Throwable cause = smEx.getCause();
- assertInstanceOf(OMException.class, cause);
- assertEquals(((OMException) cause).getResult(),
- OMException.ResultCodes.NOT_SUPPORTED_OPERATION_WHEN_PREPARED);
- }
+ StateMachineException smEx =
+ assertThrows(StateMachineException.class,
+ () ->
ozoneManagerStateMachine.preAppendTransaction(mockTransactionContext(createKeyRequest)),
+ "Expected StateMachineException to be thrown when submitting write
request while prepared.");
+ assertFalse(smEx.leaderShouldStepDown());
+
+ Throwable cause = smEx.getCause();
+ OMException omException = assertInstanceOf(OMException.class, cause);
+ assertEquals(omException.getResult(),
OMException.ResultCodes.NOT_SUPPORTED_OPERATION_WHEN_PREPARED);
// Should be able to prepare again without issue.
submittedTrx = mockTransactionContext(prepareRequest);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java
index d6913cb234..bb3e393005 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestBucketLayoutAwareOMKeyFactory.java
@@ -35,6 +35,7 @@ import static
org.apache.hadoop.ozone.om.request.BucketLayoutAwareOMKeyRequestFa
import static
org.apache.hadoop.ozone.om.request.BucketLayoutAwareOMKeyRequestFactory.OM_KEY_REQUEST_CLASSES;
import static
org.apache.hadoop.ozone.om.request.BucketLayoutAwareOMKeyRequestFactory.addRequestClass;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.fail;
/**
@@ -135,21 +136,17 @@ public class TestBucketLayoutAwareOMKeyFactory {
addRequestClass(Type.PurgeDirectories,
OMDirectoriesPurgeRequestWithFSO.class,
BucketLayout.FILE_SYSTEM_OPTIMIZED);
- try {
- // This should fail, since this class does not have a valid constructor -
- // one that takes an OMRequest and a BucketLayout as parameters.
- getRequestInstanceFromMap(
- OMRequest.newBuilder()
- .setCmdType(Type.PurgeKeys)
- .setClientId("xyz")
- .build(),
- getKey(Type.PurgeDirectories, BucketLayout.FILE_SYSTEM_OPTIMIZED),
- BucketLayout.FILE_SYSTEM_OPTIMIZED);
- fail("No exception thrown for invalid OMKeyRequest class");
- } catch (NoSuchMethodException ex) {
- // expected exception.
- LOG.info("Expected exception thrown for invalid OMKeyRequest class", ex);
- }
+ // This should fail, since this class does not have a valid constructor -
+ // one that takes an OMRequest and a BucketLayout as parameters.
+ assertThrows(NoSuchMethodException.class,
+ () -> getRequestInstanceFromMap(
+ OMRequest.newBuilder()
+ .setCmdType(Type.PurgeKeys)
+ .setClientId("xyz")
+ .build(),
+ getKey(Type.PurgeDirectories, BucketLayout.FILE_SYSTEM_OPTIMIZED),
+ BucketLayout.FILE_SYSTEM_OPTIMIZED),
+ "No exception thrown for invalid OMKeyRequest class");
}
/**
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java
index ae421470af..1b38076b1e 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestNormalizePaths.java
@@ -25,7 +25,7 @@ import org.junit.jupiter.api.Test;
import static
org.apache.hadoop.ozone.om.request.OMClientRequest.validateAndNormalizeKey;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Class to test normalize paths.
@@ -75,12 +75,11 @@ public class TestNormalizePaths {
}
private void checkInvalidPath(String keyName) {
- try {
- validateAndNormalizeKey(true, keyName);
- fail("checkInvalidPath failed for path " + keyName);
- } catch (OMException ex) {
- assertThat(ex.getMessage()).contains("Invalid KeyPath");
- }
+ OMException ex =
+ assertThrows(OMException.class,
+ () -> validateAndNormalizeKey(true, keyName),
+ "checkInvalidPath failed for path " + keyName);
+ assertThat(ex.getMessage()).contains("Invalid KeyPath");
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
index b765e90281..12d9d02a72 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCreateRequest.java
@@ -69,14 +69,12 @@ import static
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.
import static
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.OK;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
import static org.junit.jupiter.api.Assertions.assertSame;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.when;
/**
@@ -802,18 +800,13 @@ public class TestOMKeyCreateRequest extends
TestOMKeyRequest {
private void checkNotAValidPath(String keyName) {
OMRequest omRequest = createKeyRequest(false, 0, keyName);
OMKeyCreateRequest omKeyCreateRequest = getOMKeyCreateRequest(omRequest);
-
- try {
- omKeyCreateRequest.preExecute(ozoneManager);
- fail("checkNotAValidPath failed for path" + keyName);
- } catch (IOException ex) {
- OMException omException = assertInstanceOf(OMException.class, ex);
- assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
- omException.getResult());
- }
-
-
+ OMException ex =
+ assertThrows(OMException.class, () ->
omKeyCreateRequest.preExecute(ozoneManager),
+ "checkNotAValidPath failed for path" + keyName);
+ assertEquals(OMException.ResultCodes.INVALID_KEY_NAME,
+ ex.getResult());
}
+
private void checkNotAFile(String keyName) throws Exception {
OMRequest omRequest = createKeyRequest(false, 0, keyName);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
index 3bcf75fc6d..fdb332f660 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/validation/TestRequestValidations.java
@@ -158,11 +158,8 @@ public class TestRequestValidations {
public void testPreProcessorExceptionHandling() throws Exception {
ValidationContext ctx = of(aFinalizedVersionManager(), metadataManager);
RequestValidations validations = loadValidations(ctx);
-
- try {
- validations.validateRequest(aDeleteKeysRequest(olderClientVersion()));
- fail("ServiceException was expected but was not thrown.");
- } catch (Exception ignored) { }
+ assertThrows(Exception.class,
+ () ->
validations.validateRequest(aDeleteKeysRequest(olderClientVersion())));
validationListener.assertNumOfEvents(1);
validationListener.assertExactListOfValidatorsCalled(
@@ -173,12 +170,8 @@ public class TestRequestValidations {
public void testPostProcessorExceptionHandling() {
ValidationContext ctx = of(aFinalizedVersionManager(), metadataManager);
RequestValidations validations = loadValidations(ctx);
-
- try {
- validations.validateResponse(
- aDeleteKeysRequest(olderClientVersion()), aDeleteKeysResponse());
- fail("ServiceException was expected but was not thrown.");
- } catch (Exception ignored) { }
+ assertThrows(Exception.class,
+ () ->
validations.validateResponse(aDeleteKeysRequest(olderClientVersion()),
aDeleteKeysResponse()));
validationListener.assertNumOfEvents(1);
validationListener.assertExactListOfValidatorsCalled(
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
index c482ee0473..033ec39342 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMUpgradeFinalizer.java
@@ -45,9 +45,7 @@ import static
org.apache.hadoop.ozone.upgrade.UpgradeFinalizer.Status.STARTING_F
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertInstanceOf;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.anyInt;
import static org.mockito.Mockito.lenient;
import static org.mockito.Mockito.mock;
@@ -195,17 +193,9 @@ public class TestOMUpgradeFinalizer {
setupVersionManagerMockToFinalize(lfs);
OMUpgradeFinalizer finalizer = new OMUpgradeFinalizer(versionManager);
- try {
- finalizer.finalize(CLIENT_ID, om);
- fail();
- } catch (Exception e) {
- assertInstanceOf(UpgradeException.class, e);
- assertThat(e.getMessage()).contains(lfs.iterator().next().name());
- assertEquals(
- ((UpgradeException) e).getResult(),
- LAYOUT_FEATURE_FINALIZATION_FAILED
- );
- }
+ UpgradeException e = assertThrows(UpgradeException.class, () ->
finalizer.finalize(CLIENT_ID, om));
+ assertThat(e.getMessage()).contains(lfs.iterator().next().name());
+ assertEquals(e.getResult(), LAYOUT_FEATURE_FINALIZATION_FAILED);
if (finalizer.isFinalizationDone()) {
when(versionManager.getUpgradeState()).thenReturn(FINALIZATION_DONE);
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
index 8b220948db..45647c3dff 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/upgrade/TestOMVersionManager.java
@@ -25,8 +25,8 @@ import static
org.apache.hadoop.ozone.upgrade.LayoutFeature.UpgradeActionType.VA
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.anyString;
import static org.mockito.Mockito.doCallRealMethod;
import static org.mockito.Mockito.mock;
@@ -65,13 +65,8 @@ public class TestOMVersionManager {
public void testOMLayoutVersionManagerInitError() {
int lV = OMLayoutFeature.values()[OMLayoutFeature.values().length - 1]
.layoutVersion() + 1;
-
- try {
- new OMLayoutVersionManager(lV);
- fail();
- } catch (OMException ex) {
- assertEquals(NOT_SUPPORTED_OPERATION, ex.getResult());
- }
+ OMException ome = assertThrows(OMException.class, () -> new
OMLayoutVersionManager(lV));
+ assertEquals(NOT_SUPPORTED_OPERATION, ome.getResult());
}
@Test
diff --git
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
index 6733bce59b..854ac74bd3 100644
---
a/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
+++
b/hadoop-ozone/recon/src/test/java/org/apache/hadoop/ozone/recon/api/TestContainerEndpoint.java
@@ -103,8 +103,8 @@ import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -930,12 +930,9 @@ public class TestContainerEndpoint {
@Test
public void testUnhealthyContainersInvalidState() {
- try {
- containerEndpoint.getUnhealthyContainers("invalid", 1000, 1);
- fail("Expected exception to be raised");
- } catch (WebApplicationException e) {
- assertEquals("HTTP 400 Bad Request", e.getMessage());
- }
+ WebApplicationException e = assertThrows(WebApplicationException.class,
+ () -> containerEndpoint.getUnhealthyContainers("invalid", 1000, 1));
+ assertEquals("HTTP 400 Bad Request", e.getMessage());
}
@Test
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
index 3a3b474315..eedee2855e 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestMultipartUploadComplete.java
@@ -42,7 +42,7 @@ import
org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest.Part;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -181,13 +181,10 @@ public class TestMultipartUploadComplete {
CompleteMultipartUploadRequest completeMultipartUploadRequest = new
CompleteMultipartUploadRequest();
completeMultipartUploadRequest.setPartList(partsList);
- try {
- completeMultipartUpload(key, completeMultipartUploadRequest, uploadID);
- fail("testMultipartInvalidPartOrderError");
- } catch (OS3Exception ex) {
- assertEquals(S3ErrorTable.INVALID_PART_ORDER.getCode(), ex.getCode());
- }
-
+ OS3Exception ex =
+ assertThrows(OS3Exception.class,
+ () -> completeMultipartUpload(key, completeMultipartUploadRequest,
uploadID));
+ assertEquals(S3ErrorTable.INVALID_PART_ORDER.getCode(), ex.getCode());
}
@Test
@@ -218,12 +215,9 @@ public class TestMultipartUploadComplete {
CompleteMultipartUploadRequest completeMultipartUploadRequest = new
CompleteMultipartUploadRequest();
completeMultipartUploadRequest.setPartList(partsList);
- try {
- completeMultipartUpload(key, completeMultipartUploadRequest, uploadID);
- fail("testMultipartInvalidPartError");
- } catch (OS3Exception ex) {
- assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART.getCode());
- }
-
+ OS3Exception ex =
+ assertThrows(OS3Exception.class,
+ () -> completeMultipartUpload(key, completeMultipartUploadRequest,
uploadID));
+ assertEquals(ex.getCode(), S3ErrorTable.INVALID_PART.getCode());
}
}
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
index c79b085fd1..90d490dea0 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUpload.java
@@ -43,7 +43,7 @@ import static
org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -128,17 +128,15 @@ public class TestPartUpload {
@Test
public void testPartUploadWithIncorrectUploadID() throws Exception {
- try {
+ OS3Exception ex = assertThrows(OS3Exception.class, () -> {
String content = "Multipart Upload With Incorrect uploadID";
ByteArrayInputStream body =
new ByteArrayInputStream(content.getBytes(UTF_8));
REST.put(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, content.length(), 1,
"random", body);
- fail("testPartUploadWithIncorrectUploadID failed");
- } catch (OS3Exception ex) {
- assertEquals("NoSuchUpload", ex.getCode());
- assertEquals(HTTP_NOT_FOUND, ex.getHttpCode());
- }
+ });
+ assertEquals("NoSuchUpload", ex.getCode());
+ assertEquals(HTTP_NOT_FOUND, ex.getHttpCode());
}
@Test
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
index 210b5ffb48..787aa6e877 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPartUploadWithStream.java
@@ -39,8 +39,8 @@ import static
org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertTrue;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -131,16 +131,14 @@ public class TestPartUploadWithStream {
@Test
public void testPartUploadWithIncorrectUploadID() throws Exception {
- try {
+ OS3Exception ex = assertThrows(OS3Exception.class, () -> {
String content = "Multipart Upload With Incorrect uploadID";
ByteArrayInputStream body =
new ByteArrayInputStream(content.getBytes(UTF_8));
REST.put(S3BUCKET, S3KEY, content.length(), 1,
"random", body);
- fail("testPartUploadWithIncorrectUploadID failed");
- } catch (OS3Exception ex) {
- assertEquals("NoSuchUpload", ex.getCode());
- assertEquals(HTTP_NOT_FOUND, ex.getHttpCode());
- }
+ });
+ assertEquals("NoSuchUpload", ex.getCode());
+ assertEquals(HTTP_NOT_FOUND, ex.getHttpCode());
}
}
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
index 35821f2003..947b0986c8 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
@@ -57,7 +57,6 @@ import static
org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
import static org.apache.hadoop.ozone.s3.util.S3Utils.urlEncode;
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
-import static org.junit.jupiter.api.Assertions.fail;
import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
import static org.mockito.Mockito.mock;
@@ -265,12 +264,9 @@ public class TestS3GatewayMetrics {
InputStream inputBody = TestBucketAcl.class.getClassLoader()
.getResourceAsStream("userAccessControlList.xml");
-
try {
- bucketEndpoint.put("unknown_bucket", ACL_MARKER, headers,
- inputBody);
- fail();
- } catch (OS3Exception ex) {
+ assertThrows(OS3Exception.class, () ->
bucketEndpoint.put("unknown_bucket", ACL_MARKER, headers,
+ inputBody));
} finally {
inputBody.close();
}
diff --git
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
index e6d0b3208e..47f465383d 100644
---
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
+++
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/audit/parser/TestAuditParser.java
@@ -46,7 +46,7 @@ import java.util.List;
import static org.assertj.core.api.Assertions.assertThat;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.junit.jupiter.api.Assertions.assertInstanceOf;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
/**
* Tests AuditParser.
@@ -189,13 +189,10 @@ public class TestAuditParser {
@Test
public void testLoadCommand() {
String[] args1 = new String[]{dbName, "load", LOGS1};
- try {
- execute(args1, "");
- fail("No exception thrown.");
- } catch (Exception e) {
- assertInstanceOf(ArrayIndexOutOfBoundsException.class, e.getCause());
- assertThat(e.getMessage()).contains(": 5");
- }
+ Exception e =
+ assertThrows(Exception.class, () -> execute(args1, ""));
+ assertInstanceOf(ArrayIndexOutOfBoundsException.class, e.getCause());
+ assertThat(e.getMessage()).contains(": 5");
}
/**
diff --git
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java
index cf9e868c0f..d44aed70eb 100644
---
a/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java
+++
b/hadoop-ozone/tools/src/test/java/org/apache/hadoop/ozone/scm/TestDecommissionScmSubcommand.java
@@ -28,7 +28,7 @@ import java.util.UUID;
import org.junit.jupiter.api.Test;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.fail;
+import static org.junit.jupiter.api.Assertions.assertThrows;
import static org.mockito.Mockito.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@@ -97,12 +97,9 @@ public class TestDecommissionScmSubcommand {
.thenAnswer(invocation -> (
response));
- try (GenericTestUtils.SystemOutCapturer capture =
- new GenericTestUtils.SystemOutCapturer()) {
- cmd.execute(client);
- fail();
- } catch (IOException ex) {
- assertThat(ex.getMessage()).contains("remove current leader");
+ try (GenericTestUtils.SystemOutCapturer capture = new
GenericTestUtils.SystemOutCapturer()) {
+ IOException ioe = assertThrows(IOException.class, () ->
cmd.execute(client));
+ assertThat(ioe.getMessage()).contains("remove current leader");
}
}
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]