This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new e2bf5998c1 HDDS-12345. Share cluster in filesystem tests (#7959)
e2bf5998c1 is described below
commit e2bf5998c1e109bf9dbe28e0ad56b69220086814
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Tue Feb 25 14:38:45 2025 +0100
HDDS-12345. Share cluster in filesystem tests (#7959)
---
.../java/org/apache/hadoop/ozone/om/OmConfig.java | 11 +++
.../org/apache/hadoop/ozone/om/TestOmConfig.java | 29 ++++++++
.../hadoop/fs/ozone/TestOzoneFSBucketLayout.java | 23 ++----
.../hadoop/fs/ozone/TestOzoneFSInputStream.java | 81 +++++++---------------
.../fs/ozone/TestOzoneFSWithObjectStoreCreate.java | 53 ++++++--------
.../fs/ozone/TestOzoneFileSystemMetrics.java | 75 +++++++-------------
.../fs/ozone/TestOzoneFileSystemMissingParent.java | 47 +++++--------
.../hadoop/hdds/scm/TestContainerOperations.java | 5 +-
.../java/org/apache/ozone/test/NonHATests.java | 40 +++++++++++
9 files changed, 176 insertions(+), 188 deletions(-)
diff --git
a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OmConfig.java
b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OmConfig.java
index 901a911478..9abbaafe5f 100644
--- a/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OmConfig.java
+++ b/hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OmConfig.java
@@ -78,6 +78,17 @@ public void validate() {
}
}
+ public OmConfig copy() {
+ OmConfig copy = new OmConfig();
+ copy.setFrom(this);
+ return copy;
+ }
+
+ public void setFrom(OmConfig other) {
+ fileSystemPathEnabled = other.fileSystemPathEnabled;
+ maxListSize = other.maxListSize;
+ }
+
/**
* String keys for tests and grep.
*/
diff --git
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/TestOmConfig.java
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/TestOmConfig.java
index 30fa3ff17c..29343ea22a 100644
---
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/TestOmConfig.java
+++
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/TestOmConfig.java
@@ -18,6 +18,7 @@
package org.apache.hadoop.ozone.om;
import static org.assertj.core.api.Assertions.assertThat;
+import static org.junit.jupiter.api.Assertions.assertEquals;
import org.apache.hadoop.hdds.conf.MutableConfigurationSource;
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
@@ -51,4 +52,32 @@ void overridesInvalidListSize(long invalidValue) {
.isEqualTo(OmConfig.Defaults.SERVER_LIST_MAX_SIZE);
}
+ @Test
+ void testCopy() {
+ MutableConfigurationSource conf = new OzoneConfiguration();
+ OmConfig original = conf.getObject(OmConfig.class);
+
+ OmConfig subject = original.copy();
+
+ assertConfigEquals(original, subject);
+ }
+
+ @Test
+ void testSetFrom() {
+ MutableConfigurationSource conf = new OzoneConfiguration();
+ OmConfig subject = conf.getObject(OmConfig.class);
+ OmConfig updated = conf.getObject(OmConfig.class);
+ updated.setFileSystemPathEnabled(!updated.isFileSystemPathEnabled());
+ updated.setMaxListSize(updated.getMaxListSize() + 1);
+
+ subject.setFrom(updated);
+
+ assertConfigEquals(updated, subject);
+ }
+
+ private static void assertConfigEquals(OmConfig expected, OmConfig actual) {
+ assertEquals(expected.getMaxListSize(), actual.getMaxListSize());
+ assertEquals(expected.isFileSystemPathEnabled(),
actual.isFileSystemPathEnabled());
+ }
+
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
index 57fdc3c35b..62428079e8 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSBucketLayout.java
@@ -34,7 +34,6 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.scm.OzoneClientConfig;
import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
@@ -43,6 +42,7 @@
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.TestInstance;
@@ -52,12 +52,10 @@
/**
* Ozone file system tests to validate default bucket layout configuration
* and behaviour.
- * TODO: merge with some other test
*/
@TestInstance(TestInstance.Lifecycle.PER_CLASS)
-class TestOzoneFSBucketLayout {
+public abstract class TestOzoneFSBucketLayout implements NonHATests.TestCase {
- private MiniOzoneCluster cluster;
private ObjectStore objectStore;
private OzoneClient client;
private String rootPath;
@@ -95,25 +93,17 @@ static Collection<String> invalidDefaultBucketLayouts() {
}
@BeforeAll
- void initCluster() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
- .build();
- cluster.waitForClusterToBeReady();
- client = cluster.newClient();
+ void setUp() throws Exception {
+ client = cluster().newClient();
objectStore = client.getObjectStore();
rootPath = String.format("%s://%s/",
- OzoneConsts.OZONE_OFS_URI_SCHEME, conf.get(OZONE_OM_ADDRESS_KEY));
+ OzoneConsts.OZONE_OFS_URI_SCHEME,
cluster().getConf().get(OZONE_OM_ADDRESS_KEY));
volumeName = TestDataUtil.createVolumeAndBucket(client).getVolumeName();
}
@AfterAll
- void teardown() throws IOException {
+ void tearDown() {
IOUtils.closeQuietly(client);
- if (cluster != null) {
- cluster.shutdown();
- }
}
@ParameterizedTest
@@ -134,6 +124,7 @@ void fileSystemWithUnsupportedDefaultBucketLayout(String
layout) {
assertThat(e.getMessage())
.contains(ERROR_MAP.get(layout));
}
+
@ParameterizedTest
@MethodSource("validDefaultBucketLayouts")
void fileSystemWithValidBucketLayout(String layout) throws IOException {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
index 8e7d78e22f..fd977cef3d 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.fs.ozone;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
+import static org.apache.hadoop.hdds.utils.IOUtils.closeQuietly;
import static org.junit.jupiter.api.Assertions.assertArrayEquals;
import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertThrows;
import java.io.BufferedInputStream;
@@ -28,34 +30,30 @@
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
+import java.net.URI;
import java.nio.ByteBuffer;
import java.util.UUID;
import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdds.client.DefaultReplicationConfig;
import org.apache.hadoop.hdds.client.ECReplicationConfig;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.ozone.ClientConfigForTesting;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.BucketArgs;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.assertj.core.api.Assertions;
+import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
import org.junit.jupiter.params.ParameterizedTest;
import org.junit.jupiter.params.provider.ValueSource;
@@ -63,42 +61,19 @@
/**
* Test OzoneFSInputStream by reading through multiple interfaces.
*/
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Timeout(300)
-public class TestOzoneFSInputStream {
-
- private static MiniOzoneCluster cluster = null;
- private static OzoneClient client;
- private static FileSystem fs;
- private static FileSystem ecFs;
- private static Path filePath = null;
- private static byte[] data = null;
- private static OzoneConfiguration conf = null;
-
- /**
- * Create a MiniDFSCluster for testing.
- * <p>
- * Ozone is made active by setting OZONE_ENABLED = true
- *
- * @throws IOException
- */
+public abstract class TestOzoneFSInputStream implements NonHATests.TestCase {
+
+ private OzoneClient client;
+ private FileSystem fs;
+ private FileSystem ecFs;
+ private Path filePath = null;
+ private byte[] data = null;
+
@BeforeAll
- public static void init() throws Exception {
- conf = new OzoneConfiguration();
- conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
- BucketLayout.LEGACY.name());
-
- ClientConfigForTesting.newBuilder(StorageUnit.MB)
- .setChunkSize(2)
- .setBlockSize(8)
- .setStreamBufferFlushSize(2)
- .setStreamBufferMaxSize(4)
- .applyTo(conf);
-
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(5)
- .build();
- cluster.waitForClusterToBeReady();
- client = cluster.newClient();
+ void init() throws Exception {
+ client = cluster().newClient();
// create a volume and a bucket to be used by OzoneFileSystem
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
@@ -106,8 +81,7 @@ public static void init() throws Exception {
// Set the fs.defaultFS and start the filesystem
String uri = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
bucket.getVolumeName());
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
- fs = FileSystem.get(conf);
+ fs = FileSystem.get(URI.create(uri), cluster().getConf());
int fileLen = 30 * 1024 * 1024;
data = string2Bytes(RandomStringUtils.randomAlphanumeric(fileLen));
filePath = new Path("/" + RandomStringUtils.randomAlphanumeric(5));
@@ -129,19 +103,12 @@ public static void init() throws Exception {
ecBucket);
String ecUri = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, ecBucket, bucket.getVolumeName());
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, ecUri);
- ecFs = FileSystem.get(conf);
+ ecFs = FileSystem.get(URI.create(ecUri), cluster().getConf());
}
- /**
- * Shutdown MiniDFSCluster.
- */
@AfterAll
- public static void shutdown() throws IOException {
- IOUtils.cleanupWithLogger(null, client);
- fs.close();
- ecFs.close();
- cluster.shutdown();
+ void shutdown() {
+ closeQuietly(client, fs, ecFs);
}
@Test
@@ -240,7 +207,7 @@ public void testByteBufferPositionedReadFully() throws
IOException {
// File position should not be changed after positional readFully
assertEquals(currentPos, inputStream.getPos());
// Make sure buffer is full after readFully
- Assertions.assertThat((!buffer.hasRemaining()));
+ assertFalse(buffer.hasRemaining());
byte[] value1 = new byte[bufferCapacity];
System.arraycopy(buffer.array(), 0, value1, 0, bufferCapacity);
@@ -254,7 +221,7 @@ public void testByteBufferPositionedReadFully() throws
IOException {
position = 8;
inputStream.readFully(position, buffer);
assertEquals(currentPos, inputStream.getPos());
- Assertions.assertThat((!buffer.hasRemaining()));
+ assertFalse(buffer.hasRemaining());
byte[] value3 = new byte[bufferCapacity];
System.arraycopy(buffer.array(), 0, value3, 0, bufferCapacity);
byte[] value4 = new byte[bufferCapacity];
@@ -328,7 +295,7 @@ public void testSequenceFileReaderSync() throws IOException
{
input.close();
// Start SequenceFile.Reader test
- SequenceFile.Reader in = new SequenceFile.Reader(fs, path, conf);
+ SequenceFile.Reader in = new SequenceFile.Reader(fs, path,
cluster().getConf());
long blockStart = -1;
// EOFException should not occur.
in.sync(0);
@@ -350,7 +317,7 @@ public void testSequenceFileReaderSyncEC() throws
IOException {
input.close();
// Start SequenceFile.Reader test
- SequenceFile.Reader in = new SequenceFile.Reader(ecFs, path, conf);
+ SequenceFile.Reader in = new SequenceFile.Reader(ecFs, path,
cluster().getConf());
long blockStart = -1;
// EOFException should not occur.
in.sync(0);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
index a9fba2836a..8f1516c2e2 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSWithObjectStoreCreate.java
@@ -29,6 +29,7 @@
import static org.junit.jupiter.api.Assertions.assertTrue;
import java.io.FileNotFoundException;
+import java.io.IOException;
import java.net.URI;
import java.security.MessageDigest;
import java.util.ArrayList;
@@ -45,9 +46,7 @@
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OmUtils;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
@@ -56,54 +55,46 @@
import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.client.io.OzoneInputStream;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OmConfig;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
+import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
/**
* Class tests create with object store and getFileStatus.
*/
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Timeout(300)
-public class TestOzoneFSWithObjectStoreCreate {
-
- private String rootPath;
-
- private static MiniOzoneCluster cluster = null;
- private static OzoneClient client;
+public abstract class TestOzoneFSWithObjectStoreCreate implements
NonHATests.TestCase {
+ private OzoneClient client;
private OzoneFileSystem o3fs;
-
private String volumeName;
-
private String bucketName;
+ private OmConfig originalOmConfig;
@BeforeAll
- public static void initClass() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
-
- conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
- conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
- BucketLayout.LEGACY.name());
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
- .build();
- cluster.waitForClusterToBeReady();
- client = cluster.newClient();
+ void initClass() throws IOException {
+ client = cluster().newClient();
+
+ OmConfig omConfig = cluster().getOzoneManager().getConfig();
+ originalOmConfig = omConfig.copy();
+ omConfig.setFileSystemPathEnabled(true);
+
}
@AfterAll
- public static void teardownClass() {
+ void tearDownClass() {
IOUtils.closeQuietly(client);
- if (cluster != null) {
- cluster.shutdown();
- }
+ cluster().getOzoneManager().getConfig().setFrom(originalOmConfig);
}
@BeforeEach
@@ -111,18 +102,16 @@ public void init() throws Exception {
volumeName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
bucketName = RandomStringUtils.randomAlphabetic(10).toLowerCase();
- OzoneConfiguration conf = cluster.getConf();
-
// create a volume and a bucket to be used by OzoneFileSystem
- TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName);
+ TestDataUtil.createVolumeAndBucket(client, volumeName, bucketName,
BucketLayout.LEGACY);
- rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME, bucketName,
+ String rootPath = String.format("%s://%s.%s/", OZONE_URI_SCHEME,
bucketName,
volumeName);
- o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath), conf);
+ o3fs = (OzoneFileSystem) FileSystem.get(new URI(rootPath),
cluster().getConf());
}
@AfterEach
- public void teardown() {
+ void tearDown() {
IOUtils.closeQuietly(o3fs);
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
index 5655cd2e53..0c456e6a8b 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMetrics.java
@@ -22,88 +22,65 @@
import static org.junit.jupiter.api.Assertions.assertEquals;
import java.io.IOException;
+import java.net.URI;
import org.apache.commons.lang3.RandomStringUtils;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.ozone.ClientConfigForTesting;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OmConfig;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
import org.junit.jupiter.api.Timeout;
/**
* Test OM Metrics for OzoneFileSystem operations.
*/
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
@Timeout(300)
-public class TestOzoneFileSystemMetrics {
- private static MiniOzoneCluster cluster = null;
- private static OzoneClient client;
- private static FileSystem fs;
- private static OzoneBucket bucket;
+public abstract class TestOzoneFileSystemMetrics implements
NonHATests.TestCase {
+
+ private OzoneClient client;
+ private FileSystem fs;
+ private OzoneBucket bucket;
+ private OmConfig originalOmConfig;
enum TestOps {
File,
Directory,
Key
}
- /**
- * Create a MiniDFSCluster for testing.
- * <p>
- * Ozone is made active by setting OZONE_ENABLED = true
- *
- * @throws IOException
- */
+
@BeforeAll
- public static void init() throws Exception {
- OzoneConfiguration conf = new OzoneConfiguration();
- conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
- BucketLayout.LEGACY.name());
- conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
-
- ClientConfigForTesting.newBuilder(StorageUnit.MB)
- .setChunkSize(2)
- .setBlockSize(8)
- .setStreamBufferFlushSize(2)
- .setStreamBufferMaxSize(4)
- .applyTo(conf);
-
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
- .build();
- cluster.waitForClusterToBeReady();
- client = cluster.newClient();
+ void init() throws Exception {
+ client = cluster().newClient();
+
+ OmConfig omConfig = cluster().getOzoneManager().getConfig();
+ originalOmConfig = omConfig.copy();
+ omConfig.setFileSystemPathEnabled(true);
// create a volume and a bucket to be used by OzoneFileSystem
- bucket = TestDataUtil.createVolumeAndBucket(client);
+ bucket = TestDataUtil.createVolumeAndBucket(client, BucketLayout.LEGACY);
// Set the fs.defaultFS and start the filesystem
String uri = String.format("%s://%s.%s/",
OzoneConsts.OZONE_URI_SCHEME, bucket.getName(),
bucket.getVolumeName());
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
- fs = FileSystem.get(conf);
+ fs = FileSystem.get(URI.create(uri), cluster().getConf());
}
- /**
- * Shutdown MiniDFSCluster.
- */
@AfterAll
- public static void shutdown() throws IOException {
- IOUtils.closeQuietly(client);
- fs.close();
- cluster.shutdown();
+ void cleanup() {
+ IOUtils.closeQuietly(client, fs);
+ cluster().getOzoneManager().getConfig().setFrom(originalOmConfig);
}
@Test
@@ -122,7 +99,7 @@ public void testDirOps() throws Exception {
}
private void testOzoneFileCommit(TestOps op) throws Exception {
- long numKeysBeforeCreate = cluster
+ long numKeysBeforeCreate = cluster()
.getOzoneManager().getMetrics().getNumKeys();
int fileLen = 30 * 1024 * 1024;
@@ -151,13 +128,13 @@ private void testOzoneFileCommit(TestOps op) throws
Exception {
throw new IOException("Execution should never reach here." + op);
}
- long numKeysAfterCommit = cluster
+ long numKeysAfterCommit = cluster()
.getOzoneManager().getMetrics().getNumKeys();
assertThat(numKeysAfterCommit).isGreaterThan(0);
assertEquals(numKeysBeforeCreate + 2, numKeysAfterCommit);
fs.delete(parentDir, true);
- long numKeysAfterDelete = cluster
+ long numKeysAfterDelete = cluster()
.getOzoneManager().getMetrics().getNumKeys();
assertThat(numKeysAfterDelete).isGreaterThanOrEqualTo(0);
assertEquals(numKeysBeforeCreate, numKeysAfterDelete);
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
index 493efd0f20..fc79197202 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
@@ -22,46 +22,37 @@
import static org.assertj.core.api.Assertions.assertThat;
import static org.junit.jupiter.api.Assertions.assertThrows;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import java.net.URI;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.ozone.test.NonHATests;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;
+import org.junit.jupiter.api.TestInstance;
/**
* Tests OFS behavior when filesystem paths are enabled and parent directory is
* missing for some reason.
*/
-public class TestOzoneFileSystemMissingParent {
+@TestInstance(TestInstance.Lifecycle.PER_CLASS)
+public abstract class TestOzoneFileSystemMissingParent implements
NonHATests.TestCase {
- private static OzoneConfiguration conf;
- private static MiniOzoneCluster cluster;
- private static Path bucketPath;
- private static FileSystem fs;
- private static OzoneClient client;
+ private Path bucketPath;
+ private FileSystem fs;
+ private OzoneClient client;
@BeforeAll
- public static void init() throws Exception {
- conf = new OzoneConfiguration();
- conf.setBoolean(OMConfigKeys.OZONE_OM_ENABLE_FILESYSTEM_PATHS, true);
- conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
- OMConfigKeys.OZONE_BUCKET_LAYOUT_FILE_SYSTEM_OPTIMIZED);
-
- cluster = MiniOzoneCluster.newBuilder(conf).setNumDatanodes(3).build();
- cluster.waitForClusterToBeReady();
- client = cluster.newClient();
+ void init() throws Exception {
+ client = cluster().newClient();
OzoneBucket bucket = TestDataUtil.createVolumeAndBucket(client);
@@ -70,13 +61,11 @@ public static void init() throws Exception {
String bucketName = bucket.getName();
bucketPath = new Path(volumePath, bucketName);
- String rootPath = String
- .format("%s://%s/", OzoneConsts.OZONE_OFS_URI_SCHEME,
- conf.get(OZONE_OM_ADDRESS_KEY));
+ String rootPath = String.format("%s://%s/",
+ OzoneConsts.OZONE_OFS_URI_SCHEME,
+ cluster().getConf().get(OZONE_OM_ADDRESS_KEY));
- // Set the fs.defaultFS and create filesystem.
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
- fs = FileSystem.get(conf);
+ fs = FileSystem.get(URI.create(rootPath), cluster().getConf());
}
@AfterEach
@@ -85,12 +74,8 @@ public void cleanUp() throws Exception {
}
@AfterAll
- public static void tearDown() {
- IOUtils.closeQuietly(client);
- if (cluster != null) {
- cluster.shutdown();
- cluster = null;
- }
+ void tearDown() {
+ IOUtils.closeQuietly(client, fs);
}
/**
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
index 68235657a1..b98aee7286 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/scm/TestContainerOperations.java
@@ -220,7 +220,8 @@ public void testNodeOperationalStates() throws Exception {
final int numOfDatanodes = nm.getAllNodes().size();
// Set one node to be something other than IN_SERVICE
- DatanodeDetails node = nm.getAllNodes().get(0);
+ final DatanodeDetails node = nm.getAllNodes().get(0);
+ HddsProtos.NodeOperationalState originalState =
nm.getNodeStatus(node).getOperationalState();
nm.setNodeOperationalState(node, DECOMMISSIONING);
// Nodes not in DECOMMISSIONING state should be returned as they are in
service
@@ -250,8 +251,6 @@ public void testNodeOperationalStates() throws Exception {
// Test all operational states by looping over them all and setting the
// state manually.
- node = nm.getAllNodes().get(0);
- HddsProtos.NodeOperationalState originalState =
nm.getNodeStatus(node).getOperationalState();
try {
for (HddsProtos.NodeOperationalState s :
HddsProtos.NodeOperationalState.values()) {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java
index b42c569257..97c0ca8d85 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/ozone/test/NonHATests.java
@@ -37,6 +37,46 @@ public interface TestCase {
MiniOzoneCluster cluster();
}
+ @Nested
+ class OzoneFSBucketLayout extends
org.apache.hadoop.fs.ozone.TestOzoneFSBucketLayout {
+ @Override
+ public MiniOzoneCluster cluster() {
+ return getCluster();
+ }
+ }
+
+ @Nested
+ class OzoneFSInputStream extends
org.apache.hadoop.fs.ozone.TestOzoneFSInputStream {
+ @Override
+ public MiniOzoneCluster cluster() {
+ return getCluster();
+ }
+ }
+
+ @Nested
+ class OzoneFSWithObjectStoreCreate extends
org.apache.hadoop.fs.ozone.TestOzoneFSWithObjectStoreCreate {
+ @Override
+ public MiniOzoneCluster cluster() {
+ return getCluster();
+ }
+ }
+
+ @Nested
+ class OzoneFileSystemMetrics extends
org.apache.hadoop.fs.ozone.TestOzoneFileSystemMetrics {
+ @Override
+ public MiniOzoneCluster cluster() {
+ return getCluster();
+ }
+ }
+
+ @Nested
+ class OzoneFileSystemMissingParent extends
org.apache.hadoop.fs.ozone.TestOzoneFileSystemMissingParent {
+ @Override
+ public MiniOzoneCluster cluster() {
+ return getCluster();
+ }
+ }
+
@Nested
class AllocateContainer extends
org.apache.hadoop.hdds.scm.TestAllocateContainer {
@Override
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]