This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 8ab0d078db HDDS-10215. Speed up some tests that use OmTestManagers
(#6107)
8ab0d078db is described below
commit 8ab0d078db0634b811e42818ff5bee08e3f06e7b
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Tue Jan 30 18:24:06 2024 +0100
HDDS-10215. Speed up some tests that use OmTestManagers (#6107)
---
.../java/org/apache/ozone/test/OzoneTestBase.java | 44 ++++
.../org/apache/hadoop/ozone/om/OmTestManagers.java | 27 ++-
.../hadoop/ozone/om/TestBucketManagerImpl.java | 224 ++++++++++-----------
.../apache/hadoop/ozone/om/TestKeyManagerUnit.java | 107 ++++++----
.../hadoop/ozone/om/TestOmSnapshotManager.java | 47 ++---
.../hadoop/ozone/om/TestSstFilteringService.java | 3 +-
.../apache/hadoop/ozone/om/TestTrashService.java | 3 +-
.../service/TestMultipartUploadCleanupService.java | 64 +++---
.../om/service/TestOpenKeyCleanupService.java | 55 +++--
.../hadoop/ozone/security/acl/TestParentAcl.java | 3 +-
.../hadoop/ozone/security/acl/TestVolumeOwner.java | 3 +-
11 files changed, 310 insertions(+), 270 deletions(-)
diff --git
a/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java
new file mode 100644
index 0000000000..bb675bddaf
--- /dev/null
+++
b/hadoop-hdds/test-utils/src/main/java/org/apache/ozone/test/OzoneTestBase.java
@@ -0,0 +1,44 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.ozone.test;
+
+import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.TestInfo;
+
+import java.lang.reflect.Method;
+
+/**
+ * Base class for Ozone JUnit tests.
+ * Provides test method name, which can be used to create unique items.
+ */
+public abstract class OzoneTestBase {
+
+ private TestInfo info;
+
+ @BeforeEach
+ void storeTestInfo(TestInfo testInfo) {
+ this.info = testInfo;
+ }
+
+ protected String getTestName() {
+ return info.getTestMethod()
+ .map(Method::getName)
+ .orElse("unknown");
+ }
+
+}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
index 3db3263ec5..43d29c1608 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/OmTestManagers.java
@@ -17,7 +17,9 @@
package org.apache.hadoop.ozone.om;
+import static org.apache.ozone.test.GenericTestUtils.waitFor;
import static org.mockito.Mockito.mock;
+
import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
@@ -28,9 +30,11 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.ozone.client.OzoneClientFactory;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.hdds.security.token.OzoneBlockTokenSecretManager;
+import
org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus;
import
org.apache.hadoop.security.authentication.client.AuthenticationException;
import java.io.IOException;
+import java.util.concurrent.TimeoutException;
/**
* Test utility for creating a dummy OM, the associated
@@ -38,15 +42,15 @@ import java.io.IOException;
*/
public final class OmTestManagers {
- private OzoneManagerProtocol writeClient;
- private OzoneManager om;
- private KeyManager keyManager;
- private OMMetadataManager metadataManager;
+ private final OzoneManagerProtocol writeClient;
+ private final OzoneManager om;
+ private final KeyManager keyManager;
+ private final OMMetadataManager metadataManager;
private KeyProviderCryptoExtension kmsProvider;
- private VolumeManager volumeManager;
- private BucketManager bucketManager;
- private PrefixManager prefixManager;
- private ScmBlockLocationProtocol scmBlockClient;
+ private final VolumeManager volumeManager;
+ private final BucketManager bucketManager;
+ private final PrefixManager prefixManager;
+ private final ScmBlockLocationProtocol scmBlockClient;
public OzoneManager getOzoneManager() {
return om;
@@ -74,14 +78,14 @@ public final class OmTestManagers {
}
public OmTestManagers(OzoneConfiguration conf)
- throws AuthenticationException, IOException {
+ throws AuthenticationException, IOException, InterruptedException,
TimeoutException {
this(conf, null, null);
}
public OmTestManagers(OzoneConfiguration conf,
ScmBlockLocationProtocol blockClient,
StorageContainerLocationProtocol containerClient)
- throws AuthenticationException, IOException {
+ throws AuthenticationException, IOException, InterruptedException,
TimeoutException {
if (containerClient == null) {
containerClient = mock(StorageContainerLocationProtocol.class);
}
@@ -109,6 +113,9 @@ public final class OmTestManagers {
"secretManager", mock(OzoneBlockTokenSecretManager.class));
om.start();
+ waitFor(() -> om.getOmRatisServer().checkLeaderStatus() ==
RaftServerStatus.LEADER_AND_READY,
+ 10, 10_000);
+
writeClient = OzoneClientFactory.getRpcClient(conf)
.getObjectStore().getClientProxy().getOzoneManagerClient();
metadataManager = (OmMetadataManagerImpl) HddsWhiteboxTestUtils
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
index 7919b013b1..3844fb45b3 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestBucketManagerImpl.java
@@ -18,7 +18,6 @@ package org.apache.hadoop.ozone.om;
import java.io.File;
import java.io.IOException;
-import java.nio.file.Path;
import java.util.ArrayList;
import java.util.Collections;
@@ -43,9 +42,11 @@ import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
-import
org.apache.hadoop.security.authentication.client.AuthenticationException;
-import org.junit.jupiter.api.AfterEach;
+import org.apache.ozone.test.OzoneTestBase;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.extension.ExtendWith;
import org.junit.jupiter.api.io.TempDir;
@@ -66,40 +67,36 @@ import static org.mockito.Mockito.when;
* This class tests the Bucket Manager Implementation using Mockito.
*/
@ExtendWith(MockitoExtension.class)
-public class TestBucketManagerImpl {
-
- @TempDir
- private Path folder;
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestBucketManagerImpl extends OzoneTestBase {
private OmTestManagers omTestManagers;
private OzoneManagerProtocol writeClient;
- @AfterEach
- public void cleanup() throws Exception {
- OzoneManager om = omTestManagers.getOzoneManager();
- om.stop();
- }
-
- private OzoneConfiguration createNewTestPath() throws IOException {
+ @BeforeAll
+ void setup(@TempDir File folder) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
- File newFolder = folder.toFile();
- if (!newFolder.exists()) {
- assertTrue(newFolder.mkdirs());
- }
- ServerUtils.setOzoneMetaDirPath(conf, newFolder.toString());
- return conf;
- }
+ ServerUtils.setOzoneMetaDirPath(conf, folder.toString());
- private void createSampleVol() throws IOException, AuthenticationException {
- OzoneConfiguration conf = createNewTestPath();
omTestManagers = new OmTestManagers(conf);
writeClient = omTestManagers.getWriteClient();
+ }
+ @AfterAll
+ void cleanup() throws Exception {
+ omTestManagers.getOzoneManager().stop();
+ }
+
+ public String volumeName() {
+ return getTestName().toLowerCase();
+ }
+
+ private void createSampleVol(String volume) throws IOException {
// This is a simple hack for testing, we just test if the volume via a
// null check, do not parse the value part. So just write some dummy value.
OmVolumeArgs args =
OmVolumeArgs.newBuilder()
- .setVolume("sample-vol")
+ .setVolume(volume)
.setAdminName("bilbo")
.setOwnerName("bilbo")
.build();
@@ -107,25 +104,20 @@ public class TestBucketManagerImpl {
}
@Test
- public void testCreateBucketWithoutVolume() throws Exception {
- OzoneConfiguration conf = createNewTestPath();
- omTestManagers = new OmTestManagers(conf);
- OMException omEx = assertThrows(OMException.class, () -> {
- writeClient = omTestManagers.getWriteClient();
-
- OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
- .setBucketName("bucket-one")
- .build();
- writeClient.createBucket(bucketInfo);
- });
+ void testCreateBucketWithoutVolume() {
+ OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+ .setVolumeName(volumeName())
+ .setBucketName("bucket-one")
+ .build();
+ OMException omEx = assertThrows(OMException.class, () ->
writeClient.createBucket(bucketInfo));
assertEquals(ResultCodes.VOLUME_NOT_FOUND, omEx.getResult());
assertEquals("Volume doesn't exist", omEx.getMessage());
}
@Test
- public void testCreateEncryptedBucket() throws Exception {
- createSampleVol();
+ void testCreateEncryptedBucket() throws Exception {
+ String volume = volumeName();
+ createSampleVol(volume);
KeyProviderCryptoExtension kmsProvider = omTestManagers.kmsProviderInit();
String testBekName = "key1";
@@ -138,17 +130,16 @@ public class TestBucketManagerImpl {
BucketManager bucketManager = omTestManagers.getBucketManager();
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setBucketEncryptionKey(new
BucketEncryptionKeyInfo.Builder().setKeyName("key1").build())
.build();
writeClient.createBucket(bucketInfo);
- assertNotNull(bucketManager.getBucketInfo("sample-vol",
- "bucket-one"));
+ assertNotNull(bucketManager.getBucketInfo(volume, "bucket-one"));
OmBucketInfo bucketInfoRead =
- bucketManager.getBucketInfo("sample-vol", "bucket-one");
+ bucketManager.getBucketInfo(volume, "bucket-one");
assertEquals(bucketInfoRead.getEncryptionKeyInfo().getKeyName(),
bucketInfo.getEncryptionKeyInfo().getKeyName());
@@ -157,67 +148,62 @@ public class TestBucketManagerImpl {
@Test
public void testCreateBucket() throws Exception {
- createSampleVol();
+ String volume = volumeName();
+ createSampleVol(volume);
BucketManager bucketManager = omTestManagers.getBucketManager();
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.build();
writeClient.createBucket(bucketInfo);
- assertNotNull(bucketManager.getBucketInfo("sample-vol",
- "bucket-one"));
+ assertNotNull(bucketManager.getBucketInfo(volume, "bucket-one"));
}
@Test
public void testCreateAlreadyExistingBucket() throws Exception {
- createSampleVol();
+ String volume = volumeName();
+ createSampleVol(volume);
- OMException omEx = assertThrows(OMException.class, () -> {
- OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
- .setBucketName("bucket-one")
- .build();
- writeClient.createBucket(bucketInfo);
- writeClient.createBucket(bucketInfo);
- });
- assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS,
- omEx.getResult());
+ OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
+ .setVolumeName(volume)
+ .setBucketName("bucket-one")
+ .build();
+ writeClient.createBucket(bucketInfo);
+
+ OMException omEx = assertThrows(OMException.class,
+ () -> writeClient.createBucket(bucketInfo));
+ assertEquals(ResultCodes.BUCKET_ALREADY_EXISTS, omEx.getResult());
assertEquals("Bucket already exist", omEx.getMessage());
}
@Test
public void testGetBucketInfoForInvalidBucket() throws Exception {
- createSampleVol();
- OMException exception = assertThrows(OMException.class, () -> {
- BucketManager bucketManager = omTestManagers.getBucketManager();
- bucketManager.getBucketInfo("sample-vol", "bucket-one");
- });
+ String volume = volumeName();
+ createSampleVol(volume);
+
+ BucketManager bucketManager = omTestManagers.getBucketManager();
+
+ OMException exception = assertThrows(OMException.class,
+ () -> bucketManager.getBucketInfo(volume, "bucket-one"));
assertThat(exception.getMessage()).contains("Bucket not found");
- assertEquals(ResultCodes.BUCKET_NOT_FOUND,
- exception.getResult());
+ assertEquals(ResultCodes.BUCKET_NOT_FOUND, exception.getResult());
}
@Test
- public void testGetBucketInfo() throws Exception {
- final String volumeName = "sample-vol";
+ void testGetBucketInfo() throws Exception {
+ final String volumeName = volumeName();
final String bucketName = "bucket-one";
- OzoneConfiguration conf = createNewTestPath();
- omTestManagers = new OmTestManagers(conf);
- writeClient = omTestManagers.getWriteClient();
-
OMMetadataManager metaMgr = omTestManagers.getMetadataManager();
BucketManager bucketManager = omTestManagers.getBucketManager();
// Check exception thrown when volume does not exist
- try {
- bucketManager.getBucketInfo(volumeName, bucketName);
- fail("Should have thrown OMException");
- } catch (OMException omEx) {
- assertEquals(ResultCodes.VOLUME_NOT_FOUND, omEx.getResult(),
- "getBucketInfo() should have thrown " +
- "VOLUME_NOT_FOUND as the parent volume is not created!");
- }
+ OMException omEx = assertThrows(OMException.class,
+ () -> bucketManager.getBucketInfo(volumeName, bucketName));
+ assertEquals(ResultCodes.VOLUME_NOT_FOUND, omEx.getResult(),
+ "getBucketInfo() should have thrown " +
+ "VOLUME_NOT_FOUND as the parent volume is not created!");
+
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
.setVolumeName(volumeName)
.setBucketName(bucketName)
@@ -235,16 +221,12 @@ public class TestBucketManagerImpl {
writeClient.createVolume(args);
// Create bucket
createBucket(metaMgr, bucketInfo);
+
// Check exception thrown when bucket does not exist
- try {
- bucketManager.getBucketInfo(volumeName, "bucketNotExist");
- fail("Should have thrown OMException");
- } catch (OMException omEx) {
- assertEquals(
- ResultCodes.BUCKET_NOT_FOUND, omEx.getResult(),
- "getBucketInfo() should have thrown BUCKET_NOT_FOUND " +
- "as the parent volume exists but bucket doesn't!");
- }
+ OMException e2 = assertThrows(OMException.class,
+ () -> bucketManager.getBucketInfo(volumeName, "bucketNotExist"));
+ assertEquals(ResultCodes.BUCKET_NOT_FOUND, e2.getResult());
+
OmBucketInfo result = bucketManager.getBucketInfo(volumeName, bucketName);
assertEquals(volumeName, result.getVolumeName());
assertEquals(bucketName, result.getBucketName());
@@ -259,64 +241,68 @@ public class TestBucketManagerImpl {
@Test
public void testSetBucketPropertyChangeStorageType() throws Exception {
+ String volume = volumeName();
+ createSampleVol(volume);
- createSampleVol();
OMMetadataManager metaMgr = omTestManagers.getMetadataManager();
BucketManager bucketManager = omTestManagers.getBucketManager();
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setStorageType(StorageType.DISK)
.build();
createBucket(metaMgr, bucketInfo);
OmBucketInfo result = bucketManager.getBucketInfo(
- "sample-vol", "bucket-one");
+ volume, "bucket-one");
assertEquals(StorageType.DISK,
result.getStorageType());
OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setStorageType(StorageType.SSD)
.build();
writeClient.setBucketProperty(bucketArgs);
OmBucketInfo updatedResult = bucketManager.getBucketInfo(
- "sample-vol", "bucket-one");
+ volume, "bucket-one");
assertEquals(StorageType.SSD,
updatedResult.getStorageType());
}
@Test
public void testSetBucketPropertyChangeVersioning() throws Exception {
- createSampleVol();
+ String volume = volumeName();
+ createSampleVol(volume);
BucketManager bucketManager = omTestManagers.getBucketManager();
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setIsVersionEnabled(false)
.build();
writeClient.createBucket(bucketInfo);
OmBucketInfo result = bucketManager.getBucketInfo(
- "sample-vol", "bucket-one");
+ volume, "bucket-one");
assertFalse(result.getIsVersionEnabled());
OmBucketArgs bucketArgs = OmBucketArgs.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setIsVersionEnabled(true)
.build();
writeClient.setBucketProperty(bucketArgs);
OmBucketInfo updatedResult = bucketManager.getBucketInfo(
- "sample-vol", "bucket-one");
+ volume, "bucket-one");
assertTrue(updatedResult.getIsVersionEnabled());
}
@Test
public void testDeleteBucket() throws Exception {
- createSampleVol();
+ String volume = volumeName();
+ createSampleVol(volume);
+
BucketManager bucketManager = omTestManagers.getBucketManager();
for (int i = 0; i < 5; i++) {
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-" + i)
.build();
writeClient.createBucket(bucketInfo);
@@ -324,17 +310,17 @@ public class TestBucketManagerImpl {
for (int i = 0; i < 5; i++) {
assertEquals("bucket-" + i,
bucketManager.getBucketInfo(
- "sample-vol", "bucket-" + i).getBucketName());
+ volume, "bucket-" + i).getBucketName());
}
try {
- writeClient.deleteBucket("sample-vol", "bucket-1");
+ writeClient.deleteBucket(volume, "bucket-1");
assertNotNull(bucketManager.getBucketInfo(
- "sample-vol", "bucket-2"));
+ volume, "bucket-2"));
} catch (IOException ex) {
fail(ex.getMessage());
}
OMException omEx = assertThrows(OMException.class, () -> {
- bucketManager.getBucketInfo("sample-vol", "bucket-1");
+ bucketManager.getBucketInfo(volume, "bucket-1");
});
assertEquals(ResultCodes.BUCKET_NOT_FOUND,
omEx.getResult());
@@ -343,15 +329,17 @@ public class TestBucketManagerImpl {
@Test
public void testDeleteNonEmptyBucket() throws Exception {
- createSampleVol();
+ String volume = volumeName();
+ createSampleVol(volume);
+
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.build();
writeClient.createBucket(bucketInfo);
//Create keys in bucket
OmKeyArgs args1 = new OmKeyArgs.Builder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setKeyName("key-one")
.setAcls(Collections.emptyList())
@@ -364,7 +352,7 @@ public class TestBucketManagerImpl {
writeClient.commitKey(args1, session1.getId());
OmKeyArgs args2 = new OmKeyArgs.Builder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setKeyName("key-two")
.setAcls(Collections.emptyList())
@@ -376,7 +364,7 @@ public class TestBucketManagerImpl {
OpenKeySession session2 = writeClient.openKey(args2);
writeClient.commitKey(args2, session2.getId());
OMException omEx = assertThrows(OMException.class, () -> {
- writeClient.deleteBucket("sample-vol", "bucket-one");
+ writeClient.deleteBucket(volume, "bucket-one");
});
assertEquals(ResultCodes.BUCKET_NOT_EMPTY,
omEx.getResult());
@@ -385,10 +373,12 @@ public class TestBucketManagerImpl {
@Test
public void testLinkedBucketResolution() throws Exception {
- createSampleVol();
+ String volume = volumeName();
+ createSampleVol(volume);
+
ECReplicationConfig ecConfig = new ECReplicationConfig(3, 2);
OmBucketInfo bucketInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("bucket-one")
.setDefaultReplicationConfig(
new DefaultReplicationConfig(
@@ -405,23 +395,23 @@ public class TestBucketManagerImpl {
writeClient.createBucket(bucketInfo);
OmBucketInfo bucketLinkInfo = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("link-one")
- .setSourceVolume("sample-vol")
+ .setSourceVolume(volume)
.setSourceBucket("bucket-one")
.build();
writeClient.createBucket(bucketLinkInfo);
OmBucketInfo bucketLink2 = OmBucketInfo.newBuilder()
- .setVolumeName("sample-vol")
+ .setVolumeName(volume)
.setBucketName("link-two")
- .setSourceVolume("sample-vol")
+ .setSourceVolume(volume)
.setSourceBucket("link-one")
.build();
writeClient.createBucket(bucketLink2);
OmBucketInfo storedLinkBucket =
- writeClient.getBucketInfo("sample-vol", "link-two");
+ writeClient.getBucketInfo(volume, "link-two");
assertNotNull(storedLinkBucket.getDefaultReplicationConfig(),
"Replication config is not set");
assertEquals(ecConfig,
@@ -432,12 +422,12 @@ public class TestBucketManagerImpl {
assertEquals(
"link-two", storedLinkBucket.getBucketName());
assertEquals(
- "sample-vol", storedLinkBucket.getVolumeName());
+ volume, storedLinkBucket.getVolumeName());
assertEquals(
"link-one", storedLinkBucket.getSourceBucket());
assertEquals(
- "sample-vol", storedLinkBucket.getSourceVolume());
+ volume, storedLinkBucket.getSourceVolume());
assertEquals(
bucketInfo.getBucketLayout(),
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
index 3fa8462de1..0a515f6ede 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyManagerUnit.java
@@ -31,6 +31,7 @@ import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.UUID;
+import java.util.concurrent.atomic.AtomicLong;
import org.apache.commons.io.FileUtils;
import org.apache.commons.lang3.RandomUtils;
@@ -71,11 +72,13 @@ import
org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.hadoop.util.Time;
+import org.apache.ozone.test.OzoneTestBase;
import org.apache.ratis.util.ExitUtils;
-import org.junit.jupiter.api.AfterEach;
-import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
+import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
@@ -90,6 +93,7 @@ import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.mockito.Mockito.anySet;
import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.reset;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
@@ -97,7 +101,10 @@ import static org.mockito.Mockito.when;
/**
* Unit test key manager.
*/
-public class TestKeyManagerUnit {
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestKeyManagerUnit extends OzoneTestBase {
+
+ private static final AtomicLong CONTAINER_ID = new AtomicLong();
private OzoneConfiguration configuration;
private OMMetadataManager metadataManager;
@@ -112,12 +119,9 @@ public class TestKeyManagerUnit {
private OzoneManager om;
@BeforeAll
- public static void setup() {
+ void setup() throws Exception {
ExitUtils.disableSystemExit();
- }
-
- @BeforeEach
- public void init() throws Exception {
+
configuration = new OzoneConfiguration();
testDir = GenericTestUtils.getRandomizedTestDir();
configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS,
@@ -131,10 +135,15 @@ public class TestKeyManagerUnit {
metadataManager = omTestManagers.getMetadataManager();
keyManager = (KeyManagerImpl)omTestManagers.getKeyManager();
writeClient = omTestManagers.getWriteClient();
+ }
+
+ @BeforeEach
+ void init() {
+ reset(blockClient, containerClient);
startDate = Instant.ofEpochMilli(Time.now());
}
- @AfterEach
+ @AfterAll
public void cleanup() throws Exception {
om.stop();
FileUtils.deleteDirectory(testDir);
@@ -143,34 +152,40 @@ public class TestKeyManagerUnit {
@Test
public void listMultipartUploadPartsWithZeroUpload() throws IOException {
//GIVEN
- createBucket(metadataManager, "vol1", "bucket1");
+ final String volume = volumeName();
+ createBucket(metadataManager, volume, "bucket1");
OmMultipartInfo omMultipartInfo =
- initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1");
+ initMultipartUpload(writeClient, volume, "bucket1", "dir/key1");
//WHEN
OmMultipartUploadListParts omMultipartUploadListParts = keyManager
- .listParts("vol1", "bucket1", "dir/key1",
omMultipartInfo.getUploadID(),
+ .listParts(volume, "bucket1", "dir/key1",
omMultipartInfo.getUploadID(),
0, 10);
assertEquals(0,
omMultipartUploadListParts.getPartInfoList().size());
}
+ private String volumeName() {
+ return getTestName();
+ }
+
@Test
public void listMultipartUploads() throws IOException {
//GIVEN
- createBucket(metadataManager, "vol1", "bucket1");
- createBucket(metadataManager, "vol1", "bucket2");
+ final String volume = volumeName();
+ createBucket(metadataManager, volume, "bucket1");
+ createBucket(metadataManager, volume, "bucket2");
- initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1");
- initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2");
- initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1");
+ initMultipartUpload(writeClient, volume, "bucket1", "dir/key1");
+ initMultipartUpload(writeClient, volume, "bucket1", "dir/key2");
+ initMultipartUpload(writeClient, volume, "bucket2", "dir/key1");
//WHEN
OmMultipartUploadList omMultipartUploadList =
- keyManager.listMultipartUploads("vol1", "bucket1", "");
+ keyManager.listMultipartUploads(volume, "bucket1", "");
//THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
@@ -188,8 +203,8 @@ public class TestKeyManagerUnit {
@Test
public void listMultipartUploadsWithFewEntriesInCache() throws IOException {
- String volume = UUID.randomUUID().toString();
- String bucket = UUID.randomUUID().toString();
+ String volume = volumeName();
+ String bucket = "bucket";
//GIVEN
createBucket(metadataManager, volume, bucket);
@@ -276,20 +291,21 @@ public class TestKeyManagerUnit {
public void listMultipartUploadsWithPrefix() throws IOException {
//GIVEN
- createBucket(metadataManager, "vol1", "bucket1");
- createBucket(metadataManager, "vol1", "bucket2");
+ final String volumeName = volumeName();
+ createBucket(metadataManager, volumeName, "bucket1");
+ createBucket(metadataManager, volumeName, "bucket2");
- initMultipartUpload(writeClient, "vol1", "bucket1", "dip/key1");
+ initMultipartUpload(writeClient, volumeName, "bucket1", "dip/key1");
- initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key1");
- initMultipartUpload(writeClient, "vol1", "bucket1", "dir/key2");
- initMultipartUpload(writeClient, "vol1", "bucket1", "key3");
+ initMultipartUpload(writeClient, volumeName, "bucket1", "dir/key1");
+ initMultipartUpload(writeClient, volumeName, "bucket1", "dir/key2");
+ initMultipartUpload(writeClient, volumeName, "bucket1", "key3");
- initMultipartUpload(writeClient, "vol1", "bucket2", "dir/key1");
+ initMultipartUpload(writeClient, volumeName, "bucket2", "dir/key1");
//WHEN
OmMultipartUploadList omMultipartUploadList =
- keyManager.listMultipartUploads("vol1", "bucket1", "dir");
+ keyManager.listMultipartUploads(volumeName, "bucket1", "dir");
//THEN
List<OmMultipartUpload> uploads = omMultipartUploadList.getUploads();
@@ -357,7 +373,7 @@ public class TestKeyManagerUnit {
final DatanodeDetails dn2 = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dn3 = MockDatanodeDetails.randomDatanodeDetails();
final DatanodeDetails dn4 = MockDatanodeDetails.randomDatanodeDetails();
- final long containerID = 1L;
+ final long containerID = CONTAINER_ID.incrementAndGet();
Set<Long> containerIDs = newHashSet(containerID);
final Pipeline pipeline1 = Pipeline.newBuilder()
@@ -388,18 +404,19 @@ public class TestKeyManagerUnit {
singletonList(new ContainerWithPipeline(ci, pipeline1)),
singletonList(new ContainerWithPipeline(ci, pipeline2)));
- insertVolume("volumeOne");
+ final String volume = volumeName();
+ insertVolume(volume);
- insertBucket("volumeOne", "bucketOne");
+ insertBucket(volume, "bucketOne");
BlockID blockID1 = new BlockID(containerID, 1L);
- insertKey(null, "volumeOne", "bucketOne", "keyOne", blockID1);
+ insertKey(null, volume, "bucketOne", "keyOne", blockID1);
BlockID blockID2 = new BlockID(containerID, 2L);
- insertKey(null, "volumeOne", "bucketOne", "keyTwo", blockID2);
+ insertKey(null, volume, "bucketOne", "keyTwo", blockID2);
// 1st call to get key1.
OmKeyArgs keyArgs = new Builder()
- .setVolumeName("volumeOne")
+ .setVolumeName(volume)
.setBucketName("bucketOne")
.setKeyName("keyOne")
.build();
@@ -415,7 +432,7 @@ public class TestKeyManagerUnit {
// subsequent call to key2 in same container sound result no scm calls.
keyArgs = new Builder()
- .setVolumeName("volumeOne")
+ .setVolumeName(volume)
.setBucketName("bucketOne")
.setKeyName("keyTwo")
.build();
@@ -431,7 +448,7 @@ public class TestKeyManagerUnit {
// Yet, another call with forceCacheUpdate should trigger a call to SCM.
keyArgs = new Builder()
- .setVolumeName("volumeOne")
+ .setVolumeName(volume)
.setBucketName("bucketOne")
.setKeyName("keyTwo")
.setForceUpdateContainerCacheFromSCM(true)
@@ -492,15 +509,16 @@ public class TestKeyManagerUnit {
when(containerClient.getContainerWithPipelineBatch(containerIDs))
.thenReturn(cps);
- insertVolume("volumeOne");
+ final String volume = volumeName();
+ insertVolume(volume);
- insertBucket("volumeOne", "bucketOne");
+ insertBucket(volume, "bucketOne");
- insertKey(pipelineOne, "volumeOne", "bucketOne", "keyOne",
+ insertKey(pipelineOne, volume, "bucketOne", "keyOne",
new BlockID(1L, 1L));
final OmKeyArgs.Builder keyArgs = new OmKeyArgs.Builder()
- .setVolumeName("volumeOne")
+ .setVolumeName(volume)
.setBucketName("bucketOne")
.setKeyName("keyOne");
@@ -566,7 +584,7 @@ public class TestKeyManagerUnit {
@Test
public void listStatus() throws Exception {
- String volume = "vol";
+ String volume = volumeName();
String bucket = "bucket";
String keyPrefix = "key";
String client = "client.host";
@@ -584,19 +602,20 @@ public class TestKeyManagerUnit {
Set<Long> containerIDs = new HashSet<>();
List<ContainerWithPipeline> containersWithPipeline = new ArrayList<>();
for (long i = 1; i <= 10; i++) {
+ final long containerID = CONTAINER_ID.incrementAndGet();
final OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder()
- .setBlockID(new BlockID(i, 1L))
+ .setBlockID(new BlockID(containerID, 1L))
.setPipeline(pipeline)
.setOffset(0)
.setLength(256000)
.build();
ContainerInfo containerInfo = new ContainerInfo.Builder()
- .setContainerID(i)
+ .setContainerID(containerID)
.build();
containersWithPipeline.add(
new ContainerWithPipeline(containerInfo, pipeline));
- containerIDs.add(i);
+ containerIDs.add(containerID);
OmKeyInfo keyInfo = new OmKeyInfo.Builder()
.setVolumeName(volume)
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
index 79e201b429..e1ae8f57d1 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestOmSnapshotManager.java
@@ -19,7 +19,6 @@
package org.apache.hadoop.ozone.om;
-import org.apache.commons.io.FileUtils;
import org.apache.hadoop.hdds.HddsConfigKeys;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
@@ -31,9 +30,12 @@ import org.apache.hadoop.ozone.om.helpers.SnapshotInfo;
import org.apache.hadoop.ozone.om.snapshot.OmSnapshotUtils;
import org.apache.hadoop.util.Time;
import org.apache.ozone.test.GenericTestUtils;
-import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
+import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
+import org.junit.jupiter.api.io.TempDir;
import java.io.File;
import java.io.IOException;
@@ -79,10 +81,10 @@ import static org.mockito.Mockito.when;
/**
* Unit test ozone snapshot manager.
*/
-public class TestOmSnapshotManager {
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+class TestOmSnapshotManager {
private OzoneManager om;
- private File testDir;
private static final String CANDIDATE_DIR_NAME = OM_DB_NAME +
SNAPSHOT_CANDIDATE_DIR;
private File leaderDir;
@@ -94,12 +96,10 @@ public class TestOmSnapshotManager {
private File s1File;
private File f1File;
- @BeforeEach
- public void init() throws Exception {
+ @BeforeAll
+ void init(@TempDir File tempDir) throws Exception {
OzoneConfiguration configuration = new OzoneConfiguration();
- testDir = GenericTestUtils.getRandomizedTestDir();
- configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS,
- testDir.toString());
+ configuration.set(HddsConfigKeys.OZONE_METADATA_DIRS, tempDir.toString());
// Enable filesystem snapshot feature for the test regardless of the
default
configuration.setBoolean(OMConfigKeys.OZONE_FILESYSTEM_SNAPSHOT_ENABLED_KEY,
true);
@@ -110,13 +110,11 @@ public class TestOmSnapshotManager {
OmTestManagers omTestManagers = new OmTestManagers(configuration);
om = omTestManagers.getOzoneManager();
- setupData();
}
- @AfterEach
- public void cleanup() throws Exception {
+ @AfterAll
+ void stop() {
om.stop();
- FileUtils.deleteDirectory(testDir);
}
@Test
@@ -206,7 +204,8 @@ public class TestOmSnapshotManager {
verify(firstSnapshotStore, timeout(3000).times(1)).close();
}
- private void setupData() throws IOException {
+ @BeforeEach
+ void setupData(@TempDir File testDir) throws IOException {
// Set up the leader with the following files:
// leader/db.checkpoints/checkpoint1/f1.sst
// leader/db.snapshots/checkpointState/snap1/s1.sst
@@ -230,8 +229,7 @@ public class TestOmSnapshotManager {
byte[] dummyData = {0};
// Create dummy leader files to calculate links.
- leaderDir = new File(testDir.toString(),
- "leader");
+ leaderDir = new File(testDir, "leader");
assertTrue(leaderDir.mkdirs());
String pathSnap1 = OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + "snap1";
String pathSnap2 = OM_SNAPSHOT_CHECKPOINT_DIR + OM_KEY_PREFIX + "snap2";
@@ -245,8 +243,7 @@ public class TestOmSnapshotManager {
Files.write(Paths.get(leaderSnapDir2.toString(), "nonSstFile"), dummyData);
// Also create the follower files.
- candidateDir = new File(testDir.toString(),
- CANDIDATE_DIR_NAME);
+ candidateDir = new File(testDir, CANDIDATE_DIR_NAME);
File followerSnapDir1 = new File(candidateDir.toString(), pathSnap1);
followerSnapDir2 = new File(candidateDir.toString(), pathSnap2);
copyDirectory(leaderDir.toPath(), candidateDir.toPath());
@@ -359,9 +356,9 @@ public class TestOmSnapshotManager {
* This test always passes in a null dest dir.
*/
@Test
- public void testProcessFileWithNullDestDirParameter() throws IOException {
- assertTrue(new File(testDir.toString(), "snap1").mkdirs());
- assertTrue(new File(testDir.toString(), "snap2").mkdirs());
+ void testProcessFileWithNullDestDirParameter(@TempDir File testDir) throws
IOException {
+ assertTrue(new File(testDir, "snap1").mkdirs());
+ assertTrue(new File(testDir, "snap2").mkdirs());
Path copyFile = Paths.get(testDir.toString(),
"snap1/copyfile.sst");
Files.write(copyFile,
@@ -450,10 +447,10 @@ public class TestOmSnapshotManager {
* This test always passes in a non-null dest dir.
*/
@Test
- public void testProcessFileWithDestDirParameter() throws IOException {
- assertTrue(new File(testDir.toString(), "snap1").mkdirs());
- assertTrue(new File(testDir.toString(), "snap2").mkdirs());
- assertTrue(new File(testDir.toString(), "snap3").mkdirs());
+ void testProcessFileWithDestDirParameter(@TempDir File testDir) throws
IOException {
+ assertTrue(new File(testDir, "snap1").mkdirs());
+ assertTrue(new File(testDir, "snap2").mkdirs());
+ assertTrue(new File(testDir, "snap3").mkdirs());
Path destDir = Paths.get(testDir.toString(), "destDir");
assertTrue(new File(destDir.toString()).mkdirs());
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java
index edc228a5d3..a8b026af05 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestSstFilteringService.java
@@ -36,7 +36,6 @@ import
org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.hadoop.ozone.om.snapshot.ReferenceCounted;
import org.apache.hadoop.ozone.om.snapshot.SnapshotCache;
-import
org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.ratis.util.ExitUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
@@ -87,7 +86,7 @@ public class TestSstFilteringService {
}
@BeforeEach
- public void init() throws AuthenticationException, IOException {
+ void init() throws Exception {
conf = new OzoneConfiguration();
conf.set(OZONE_METADATA_DIRS, folder.getAbsolutePath());
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
index 9cdd965068..b7c8395608 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestTrashService.java
@@ -32,7 +32,6 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
-import
org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.ratis.util.ExitUtils;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
@@ -66,7 +65,7 @@ public class TestTrashService {
private String bucketName;
@BeforeEach
- public void setup() throws IOException, AuthenticationException {
+ void setup() throws Exception {
ExitUtils.disableSystemExit();
OzoneConfiguration configuration = new OzoneConfiguration();
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
index 272b7b72db..762d874056 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestMultipartUploadCleanupService.java
@@ -36,23 +36,23 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
-import org.apache.ozone.test.GenericTestUtils;
+import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.ExpiredMultipartUploadsBucket;
import org.apache.ratis.util.ExitUtils;
-import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.CsvSource;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
import java.io.IOException;
+import java.io.UncheckedIOException;
import java.nio.file.Path;
import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
+import java.util.List;
import java.util.UUID;
import java.util.concurrent.TimeUnit;
@@ -60,31 +60,27 @@ import static
org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_CLEANUP_SERVICE_INTERVAL;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_EXPIRE_THRESHOLD;
import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_MPU_PARTS_CLEANUP_LIMIT_PER_TASK;
+import static org.apache.ozone.test.GenericTestUtils.waitFor;
import static org.assertj.core.api.Assertions.assertThat;
-import static org.junit.jupiter.api.Assertions.assertFalse;
-import static org.junit.jupiter.api.Assertions.assertTrue;
/**
* Test Multipart Upload Cleanup Service.
*/
-public class TestMultipartUploadCleanupService {
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+@Timeout(300)
+class TestMultipartUploadCleanupService {
private OzoneManagerProtocol writeClient;
private OzoneManager om;
- private static final Logger LOG =
- LoggerFactory.getLogger(TestMultipartUploadCleanupService.class);
- private static final Duration SERVICE_INTERVAL = Duration.ofMillis(500);
- private static final Duration EXPIRE_THRESHOLD = Duration.ofMillis(1000);
+ private static final Duration SERVICE_INTERVAL = Duration.ofMillis(100);
+ private static final Duration EXPIRE_THRESHOLD = Duration.ofMillis(200);
private KeyManager keyManager;
private OMMetadataManager omMetadataManager;
@BeforeAll
- public static void setup() {
+ void setup(@TempDir Path tempDir) throws Exception {
ExitUtils.disableSystemExit();
- }
- @BeforeEach
- public void createConfAndInitValues(@TempDir Path tempDir) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
System.setProperty(DBConfigFromFile.CONFIG_DIR, "/");
ServerUtils.setOzoneMetaDirPath(conf, tempDir.toString());
@@ -101,8 +97,8 @@ public class TestMultipartUploadCleanupService {
om = omTestManagers.getOzoneManager();
}
- @AfterEach
- public void cleanup() throws Exception {
+ @AfterAll
+ void cleanup() {
om.stop();
}
@@ -110,7 +106,6 @@ public class TestMultipartUploadCleanupService {
* Create a bunch incomplete/inflight multipart upload info. Then we start
* the MultipartUploadCleanupService. We make sure that all the multipart
* upload info is picked up and aborted by OzoneManager.
- * @throws Exception
*/
@ParameterizedTest
@CsvSource({
@@ -118,9 +113,7 @@ public class TestMultipartUploadCleanupService {
"0, 88",
"66, 77"
})
- @Timeout(300)
- public void checkIfCleanupServiceIsDeletingExpiredMultipartUpload(
- int numDEFKeys, int numFSOKeys) throws Exception {
+ void deletesExpiredUpload(int numDEFKeys, int numFSOKeys) throws Exception {
MultipartUploadCleanupService multipartUploadCleanupService =
(MultipartUploadCleanupService)
@@ -140,24 +133,27 @@ public class TestMultipartUploadCleanupService {
// wait for MPU info to expire
Thread.sleep(EXPIRE_THRESHOLD.toMillis());
- assertFalse(keyManager.getExpiredMultipartUploads(EXPIRE_THRESHOLD,
- 10000).isEmpty());
+ assertThat(getExpiredMultipartUploads()).isNotEmpty();
multipartUploadCleanupService.resume();
- GenericTestUtils.waitFor(() -> multipartUploadCleanupService
- .getRunCount() > oldRunCount,
- (int) SERVICE_INTERVAL.toMillis(),
- 5 * (int) SERVICE_INTERVAL.toMillis());
-
// wait for requests to complete
- Thread.sleep(10 * SERVICE_INTERVAL.toMillis());
+ waitFor(() -> getExpiredMultipartUploads().isEmpty(),
+ (int) SERVICE_INTERVAL.toMillis(),
+ 15 * (int) SERVICE_INTERVAL.toMillis());
+ assertThat(multipartUploadCleanupService.getRunCount())
+ .isGreaterThan(oldRunCount);
assertThat(multipartUploadCleanupService.getSubmittedMpuInfoCount())
.isGreaterThanOrEqualTo(oldMpuInfoCount + numDEFKeys + numFSOKeys);
- assertTrue(keyManager.getExpiredMultipartUploads(EXPIRE_THRESHOLD,
- 10000).isEmpty());
+ }
+ private List<ExpiredMultipartUploadsBucket> getExpiredMultipartUploads() {
+ try {
+ return keyManager.getExpiredMultipartUploads(EXPIRE_THRESHOLD, 10000);
+ } catch (IOException e) {
+ throw new UncheckedIOException(e);
+ }
}
private void createIncompleteMPUKeys(int mpuKeyCount,
@@ -202,10 +198,6 @@ public class TestMultipartUploadCleanupService {
/**
* Create inflight multipart upload that are not completed / aborted yet.
- * @param volumeName
- * @param bucketName
- * @param keyName
- * @throws IOException
*/
private void createIncompleteMPUKey(String volumeName, String bucketName,
String keyName, int numParts) throws IOException {
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
index 2f4016d0e9..418608e855 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
@@ -43,9 +43,9 @@ import
org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ratis.util.ExitUtils;
-import org.junit.jupiter.api.AfterEach;
+import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
-import org.junit.jupiter.api.BeforeEach;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.api.io.TempDir;
import org.junit.jupiter.params.ParameterizedTest;
@@ -67,6 +67,7 @@ import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_OPEN_KEY_EXPIRE_T
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertEquals;
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
class TestOpenKeyCleanupService {
private OzoneManagerProtocol writeClient;
private OzoneManager om;
@@ -83,12 +84,9 @@ class TestOpenKeyCleanupService {
private OMMetadataManager omMetadataManager;
@BeforeAll
- public static void setup() {
+ void setup(@TempDir Path tempDir) throws Exception {
ExitUtils.disableSystemExit();
- }
- @BeforeEach
- public void createConfAndInitValues(@TempDir Path tempDir) throws Exception {
OzoneConfiguration conf = new OzoneConfiguration();
System.setProperty(DBConfigFromFile.CONFIG_DIR, "/");
ServerUtils.setOzoneMetaDirPath(conf, tempDir.toString());
@@ -105,8 +103,8 @@ class TestOpenKeyCleanupService {
om = omTestManagers.getOzoneManager();
}
- @AfterEach
- public void cleanup() throws Exception {
+ @AfterAll
+ void cleanup() {
if (om.stop()) {
om.join();
}
@@ -144,12 +142,11 @@ class TestOpenKeyCleanupService {
final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount();
final long oldrunCount = openKeyCleanupService.getRunCount();
LOG.info("oldkeyCount={}, oldrunCount={}", oldkeyCount, oldrunCount);
- assertEquals(0, oldkeyCount);
final OMMetrics metrics = om.getMetrics();
- assertEquals(0, metrics.getNumKeyHSyncs());
- assertEquals(0, metrics.getNumOpenKeysCleaned());
- assertEquals(0, metrics.getNumOpenKeysHSyncCleaned());
+ long numKeyHSyncs = metrics.getNumKeyHSyncs();
+ long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned();
+ long numOpenKeysHSyncCleaned = metrics.getNumOpenKeysHSyncCleaned();
final int keyCount = numDEFKeys + numFSOKeys;
createOpenKeys(numDEFKeys, false, BucketLayout.DEFAULT);
createOpenKeys(numFSOKeys, hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED);
@@ -164,7 +161,7 @@ class TestOpenKeyCleanupService {
openKeyCleanupService.resume();
GenericTestUtils.waitFor(
- () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= keyCount,
+ () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount
+ keyCount,
SERVICE_INTERVAL, WAIT_TIME);
GenericTestUtils.waitFor(
() -> openKeyCleanupService.getRunCount() >= oldrunCount + 2,
@@ -174,13 +171,13 @@ class TestOpenKeyCleanupService {
waitForOpenKeyCleanup(hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED);
if (hsync) {
- assertAtLeast(numDEFKeys, metrics.getNumOpenKeysCleaned());
- assertAtLeast(numFSOKeys, metrics.getNumOpenKeysHSyncCleaned());
- assertEquals(numFSOKeys, metrics.getNumKeyHSyncs());
+ assertAtLeast(numOpenKeysCleaned + numDEFKeys,
metrics.getNumOpenKeysCleaned());
+ assertAtLeast(numOpenKeysHSyncCleaned + numFSOKeys,
metrics.getNumOpenKeysHSyncCleaned());
+ assertEquals(numKeyHSyncs + numFSOKeys, metrics.getNumKeyHSyncs());
} else {
- assertAtLeast(keyCount, metrics.getNumOpenKeysCleaned());
- assertEquals(0, metrics.getNumOpenKeysHSyncCleaned());
- assertEquals(0, metrics.getNumKeyHSyncs());
+ assertAtLeast(numOpenKeysCleaned + keyCount,
metrics.getNumOpenKeysCleaned());
+ assertEquals(numOpenKeysHSyncCleaned,
metrics.getNumOpenKeysHSyncCleaned());
+ assertEquals(numKeyHSyncs, metrics.getNumKeyHSyncs());
}
}
@@ -211,12 +208,11 @@ class TestOpenKeyCleanupService {
final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount();
final long oldrunCount = openKeyCleanupService.getRunCount();
LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount);
- assertEquals(0, oldkeyCount);
final OMMetrics metrics = om.getMetrics();
- assertEquals(0, metrics.getNumKeyHSyncs());
- assertEquals(0, metrics.getNumOpenKeysCleaned());
- assertEquals(0, metrics.getNumOpenKeysHSyncCleaned());
+ long numKeyHSyncs = metrics.getNumKeyHSyncs();
+ long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned();
+ long numOpenKeysHSyncCleaned = metrics.getNumOpenKeysHSyncCleaned();
createIncompleteMPUKeys(numDEFKeys, BucketLayout.DEFAULT, NUM_MPU_PARTS,
true);
createIncompleteMPUKeys(numFSOKeys, BucketLayout.FILE_SYSTEM_OPTIMIZED,
@@ -245,7 +241,9 @@ class TestOpenKeyCleanupService {
assertExpiredOpenKeys(true, false,
BucketLayout.FILE_SYSTEM_OPTIMIZED);
- assertEquals(0, metrics.getNumOpenKeysCleaned());
+ assertEquals(numKeyHSyncs, metrics.getNumKeyHSyncs());
+ assertEquals(numOpenKeysCleaned, metrics.getNumOpenKeysCleaned());
+ assertEquals(numOpenKeysHSyncCleaned,
metrics.getNumOpenKeysHSyncCleaned());
}
/**
@@ -275,12 +273,9 @@ class TestOpenKeyCleanupService {
final long oldkeyCount = openKeyCleanupService.getSubmittedOpenKeyCount();
final long oldrunCount = openKeyCleanupService.getRunCount();
LOG.info("oldMpuKeyCount={}, oldMpuRunCount={}", oldkeyCount, oldrunCount);
- assertEquals(0, oldkeyCount);
final OMMetrics metrics = om.getMetrics();
- assertEquals(0, metrics.getNumKeyHSyncs());
- assertEquals(0, metrics.getNumOpenKeysCleaned());
- assertEquals(0, metrics.getNumOpenKeysHSyncCleaned());
+ long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned();
final int keyCount = numDEFKeys + numFSOKeys;
final int partCount = NUM_MPU_PARTS * keyCount;
createIncompleteMPUKeys(numDEFKeys, BucketLayout.DEFAULT, NUM_MPU_PARTS,
@@ -300,7 +295,7 @@ class TestOpenKeyCleanupService {
openKeyCleanupService.resume();
GenericTestUtils.waitFor(
- () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= partCount,
+ () -> openKeyCleanupService.getSubmittedOpenKeyCount() >= oldkeyCount
+ partCount,
SERVICE_INTERVAL, WAIT_TIME);
GenericTestUtils.waitFor(
() -> openKeyCleanupService.getRunCount() >= oldrunCount + 2,
@@ -309,7 +304,7 @@ class TestOpenKeyCleanupService {
// No expired MPU parts fetched
waitForOpenKeyCleanup(false, BucketLayout.DEFAULT);
waitForOpenKeyCleanup(false, BucketLayout.FILE_SYSTEM_OPTIMIZED);
- assertAtLeast(partCount, metrics.getNumOpenKeysCleaned());
+ assertAtLeast(numOpenKeysCleaned + partCount,
metrics.getNumOpenKeysCleaned());
}
private static void assertAtLeast(long expectedMinimum, long actual) {
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java
index e9249b1ab7..c73752904e 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestParentAcl.java
@@ -44,7 +44,6 @@ import
org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer.ACLType;
import org.apache.hadoop.security.UserGroupInformation;
-import
org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.ozone.test.GenericTestUtils;
import org.apache.ozone.test.tag.Unhealthy;
import org.junit.jupiter.api.AfterAll;
@@ -95,7 +94,7 @@ public class TestParentAcl {
private static File testDir;
@BeforeAll
- public static void setup() throws IOException, AuthenticationException {
+ static void setup() throws Exception {
ozConfig = new OzoneConfiguration();
ozConfig.set(OZONE_ACL_AUTHORIZER_CLASS,
OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java
index 7a2d22b338..6670dc644d 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/security/acl/TestVolumeOwner.java
@@ -37,7 +37,6 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
import org.apache.hadoop.ozone.om.request.OMRequestTestUtils;
import org.apache.hadoop.security.UserGroupInformation;
-import
org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.ozone.test.GenericTestUtils;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
@@ -77,7 +76,7 @@ public class TestVolumeOwner {
private static File testDir;
@BeforeAll
- public static void setup() throws IOException, AuthenticationException {
+ static void setup() throws Exception {
ozoneConfig = new OzoneConfiguration();
ozoneConfig.set(OZONE_ACL_AUTHORIZER_CLASS,
OZONE_ACL_AUTHORIZER_CLASS_NATIVE);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]