This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 54eaf8239c HDDS-9488. Merge TestOzoneFileSystemWithLinks into
TestOzoneFileSystem (#5464)
54eaf8239c is described below
commit 54eaf8239cf615ebc8c95e0db7f1ca807c05b829
Author: Doroszlai, Attila <[email protected]>
AuthorDate: Tue Oct 31 00:13:17 2023 +0100
HDDS-9488. Merge TestOzoneFileSystemWithLinks into TestOzoneFileSystem
(#5464)
---
.../hadoop/fs/ozone/TestOzoneFileSystem.java | 79 +++++++
.../fs/ozone/TestOzoneFileSystemWithLinks.java | 254 ---------------------
2 files changed, 79 insertions(+), 254 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
index 76a12204a6..9394df7b73 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystem.java
@@ -46,9 +46,12 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
import org.apache.hadoop.ozone.OzoneConfigKeys;
import org.apache.hadoop.ozone.OzoneConsts;
import org.apache.hadoop.ozone.TestDataUtil;
+import org.apache.hadoop.ozone.client.BucketArgs;
+import org.apache.hadoop.ozone.client.ObjectStore;
import org.apache.hadoop.ozone.client.OzoneBucket;
import org.apache.hadoop.ozone.client.OzoneClient;
import org.apache.hadoop.ozone.client.OzoneKeyDetails;
+import org.apache.hadoop.ozone.client.OzoneVolume;
import org.apache.hadoop.ozone.om.OMConfigKeys;
import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
import org.apache.hadoop.ozone.om.TrashPolicyOzone;
@@ -76,6 +79,7 @@ import org.slf4j.LoggerFactory;
import java.io.FileNotFoundException;
import java.io.IOException;
+import java.net.URI;
import java.nio.charset.StandardCharsets;
import java.time.Instant;
import java.time.ZoneOffset;
@@ -86,6 +90,7 @@ import java.util.Collections;
import java.util.Iterator;
import java.util.Set;
import java.util.TreeSet;
+import java.util.UUID;
import static java.nio.charset.StandardCharsets.UTF_8;
import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
@@ -1727,4 +1732,78 @@ public class TestOzoneFileSystem {
// verify that mtime is NOT updated as expected.
Assert.assertEquals(mtime, fileStatus.getModificationTime());
}
+
+
+ @Test
+ public void testLoopInLinkBuckets() throws Exception {
+ String linksVolume = UUID.randomUUID().toString();
+
+ ObjectStore store = client.getObjectStore();
+
+ // Create volume
+ store.createVolume(linksVolume);
+ OzoneVolume volume = store.getVolume(linksVolume);
+
+ String linkBucket1Name = UUID.randomUUID().toString();
+ String linkBucket2Name = UUID.randomUUID().toString();
+ String linkBucket3Name = UUID.randomUUID().toString();
+
+ // case-1: Create a loop in the link buckets
+ createLinkBucket(volume, linkBucket1Name, linkBucket2Name);
+ createLinkBucket(volume, linkBucket2Name, linkBucket3Name);
+ createLinkBucket(volume, linkBucket3Name, linkBucket1Name);
+
+ String rootPath = String.format("%s://%s.%s/",
+ OzoneConsts.OZONE_URI_SCHEME, linkBucket1Name, linksVolume);
+
+ try {
+ FileSystem.get(URI.create(rootPath), cluster.getConf());
+ Assert.fail("Should throw Exception due to loop in Link Buckets");
+ } catch (OMException oe) {
+ // Expected exception
+
Assert.assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS,
+ oe.getResult());
+ } finally {
+ volume.deleteBucket(linkBucket1Name);
+ volume.deleteBucket(linkBucket2Name);
+ volume.deleteBucket(linkBucket3Name);
+ }
+
+ // case-2: Dangling link bucket
+ String danglingLinkBucketName = UUID.randomUUID().toString();
+ String sourceBucketName = UUID.randomUUID().toString();
+
+ // danglingLinkBucket is a dangling link over a source bucket that doesn't
+ // exist.
+ createLinkBucket(volume, sourceBucketName, danglingLinkBucketName);
+
+ String rootPath2 = String.format("%s://%s.%s/",
+ OzoneConsts.OZONE_URI_SCHEME, danglingLinkBucketName, linksVolume);
+
+ try {
+ FileSystem.get(URI.create(rootPath2), cluster.getConf());
+ } catch (OMException oe) {
+ // Expected exception
+ Assert.fail("Should not throw Exception and show orphan buckets");
+ } finally {
+ volume.deleteBucket(danglingLinkBucketName);
+ }
+ }
+
+ /**
+ * Helper method to create Link Buckets.
+ *
+ * @param sourceVolume Name of source volume for Link Bucket.
+ * @param sourceBucket Name of source bucket for Link Bucket.
+ * @param linkBucket Name of Link Bucket
+ * @throws IOException
+ */
+ private void createLinkBucket(OzoneVolume sourceVolume, String sourceBucket,
+ String linkBucket) throws IOException {
+ BucketArgs.Builder builder = BucketArgs.newBuilder();
+ builder.setBucketLayout(BucketLayout.DEFAULT)
+ .setSourceVolume(sourceVolume.getName())
+ .setSourceBucket(sourceBucket);
+ sourceVolume.createBucket(linkBucket, builder.build());
+ }
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithLinks.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithLinks.java
deleted file mode 100644
index 4d58c98a88..0000000000
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemWithLinks.java
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs.ozone;
-
-import org.apache.hadoop.hdds.conf.OzoneConfiguration;
-import org.apache.hadoop.hdds.utils.IOUtils;
-import org.apache.hadoop.ozone.MiniOzoneCluster;
-import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.TestDataUtil;
-import org.apache.hadoop.ozone.client.BucketArgs;
-import org.apache.hadoop.ozone.client.ObjectStore;
-import org.apache.hadoop.ozone.client.OzoneBucket;
-import org.apache.hadoop.ozone.client.OzoneClient;
-import org.apache.hadoop.ozone.client.OzoneVolume;
-import org.apache.hadoop.ozone.om.OMConfigKeys;
-import org.apache.hadoop.ozone.om.exceptions.OMException;
-import org.apache.hadoop.ozone.om.helpers.BucketLayout;
-import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
-import org.junit.rules.TestRule;
-import org.junit.rules.Timeout;
-import org.apache.ozone.test.JUnit5AwareTimeout;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import java.util.UUID;
-
-import java.io.IOException;
-
-import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_CHECKPOINT_INTERVAL_KEY;
-import static
org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_TRASH_INTERVAL_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ACL_ENABLED;
-import static
org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_FS_ITERATE_BATCH_SIZE;
-
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.Trash;
-
-import static org.junit.Assert.fail;
-
-import org.junit.After;
-import org.junit.AfterClass;
-import org.junit.Assert;
-import org.junit.Rule;
-import org.junit.Test;
-
-
-/**
- * Ozone file system tests for Link Buckets.
- */
-public class TestOzoneFileSystemWithLinks {
-
- private static final float TRASH_INTERVAL = 0.05f; // 3 seconds
-
- public TestOzoneFileSystemWithLinks() throws Exception {
- try {
- teardown();
- init();
- } catch (Exception e) {
- LOG.info("Unexpected exception", e);
- fail("Unexpected exception:" + e.getMessage());
- }
- }
-
-
- /**
- * Set a timeout for each test.
- */
- @Rule
- public TestRule timeout = new JUnit5AwareTimeout(Timeout.seconds(300));
-
- private static final Logger LOG =
- LoggerFactory.getLogger(TestOzoneFileSystem.class);
-
- private static BucketLayout bucketLayout = BucketLayout.DEFAULT;
-
- private static MiniOzoneCluster cluster;
- private static OzoneClient client;
- private static OzoneManagerProtocol writeClient;
- private static FileSystem fs;
- private static OzoneFileSystem o3fs;
- private static String volumeName;
- private static String bucketName;
- private static Trash trash;
- private OzoneConfiguration conf;
-
- private void init() throws Exception {
- conf = new OzoneConfiguration();
- conf.setFloat(OMConfigKeys.OZONE_FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
- conf.setFloat(FS_TRASH_INTERVAL_KEY, TRASH_INTERVAL);
- conf.setFloat(FS_TRASH_CHECKPOINT_INTERVAL_KEY, TRASH_INTERVAL / 2);
-
- conf.setBoolean(OZONE_ACL_ENABLED, true);
- conf.set(OMConfigKeys.OZONE_DEFAULT_BUCKET_LAYOUT,
- bucketLayout.name());
- cluster = MiniOzoneCluster.newBuilder(conf)
- .setNumDatanodes(3)
- .build();
- cluster.waitForClusterToBeReady();
-
- client = cluster.newClient();
- writeClient = client.getObjectStore()
- .getClientProxy().getOzoneManagerClient();
- // create a volume and a bucket to be used by OzoneFileSystem
- OzoneBucket bucket =
- TestDataUtil.createVolumeAndBucket(client, bucketLayout);
- volumeName = bucket.getVolumeName();
- bucketName = bucket.getName();
-
- String rootPath = String.format("%s://%s.%s/",
- OzoneConsts.OZONE_URI_SCHEME, bucketName, volumeName);
-
- // Set the fs.defaultFS and start the filesystem
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
- // Set the number of keys to be processed during batch operate.
- conf.setInt(OZONE_FS_ITERATE_BATCH_SIZE, 5);
-
- fs = FileSystem.get(conf);
- trash = new Trash(conf);
- o3fs = (OzoneFileSystem) fs;
- }
-
- @AfterClass
- public static void teardown() {
- IOUtils.closeQuietly(client);
- if (cluster != null) {
- cluster.shutdown();
- }
- IOUtils.closeQuietly(fs);
- }
-
- @After
- public void cleanup() {
- try {
- Path root = new Path("/");
- FileStatus[] fileStatuses = fs.listStatus(root);
- for (FileStatus fileStatus : fileStatuses) {
- fs.delete(fileStatus.getPath(), true);
- }
- } catch (IOException ex) {
- fail("Failed to cleanup files.");
- }
- }
-
- public static MiniOzoneCluster getCluster() {
- return cluster;
- }
-
- public static FileSystem getFs() {
- return fs;
- }
-
- public static void setBucketLayout(BucketLayout bLayout) {
- bucketLayout = bLayout;
- }
-
- public static String getBucketName() {
- return bucketName;
- }
-
- public static String getVolumeName() {
- return volumeName;
- }
-
- public BucketLayout getBucketLayout() {
- return BucketLayout.DEFAULT;
- }
-
- @Test
- public void testLoopInLinkBuckets() throws Exception {
- String linksVolume = UUID.randomUUID().toString();
-
- ObjectStore store = client.getObjectStore();
-
- // Create volume
- store.createVolume(linksVolume);
- OzoneVolume volume = store.getVolume(linksVolume);
-
- String linkBucket1Name = UUID.randomUUID().toString();
- String linkBucket2Name = UUID.randomUUID().toString();
- String linkBucket3Name = UUID.randomUUID().toString();
-
- // case-1: Create a loop in the link buckets
- createLinkBucket(volume, linkBucket1Name, linkBucket2Name);
- createLinkBucket(volume, linkBucket2Name, linkBucket3Name);
- createLinkBucket(volume, linkBucket3Name, linkBucket1Name);
-
- // Set the fs.defaultFS and start the filesystem
-
- String rootPath = String.format("%s://%s.%s/",
- OzoneConsts.OZONE_URI_SCHEME, linkBucket1Name, linksVolume);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-
- try {
- FileSystem.get(conf);
- Assert.fail("Should throw Exception due to loop in Link Buckets");
- } catch (OMException oe) {
- // Expected exception
-
Assert.assertEquals(OMException.ResultCodes.DETECTED_LOOP_IN_BUCKET_LINKS,
- oe.getResult());
- }
-
- // case-2: Dangling link bucket
- String danglingLinkBucketName = UUID.randomUUID().toString();
- String sourceBucketName = UUID.randomUUID().toString();
-
- // danglingLinkBucket is a dangling link over a source bucket that doesn't
- // exist.
- createLinkBucket(volume, sourceBucketName, danglingLinkBucketName);
-
- rootPath = String.format("%s://%s.%s/",
- OzoneConsts.OZONE_URI_SCHEME, danglingLinkBucketName, linksVolume);
- conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, rootPath);
-
- try {
- FileSystem.get(conf);
- } catch (OMException oe) {
- // Expected exception
- Assert.fail("Should not throw Exception and show orphan buckets");
- }
- }
-
- /**
- * Helper method to create Link Buckets.
- *
- * @param sourceVolume Name of source volume for Link Bucket.
- * @param sourceBucket Name of source bucket for Link Bucket.
- * @param linkBucket Name of Link Bucket
- * @throws IOException
- */
- private void createLinkBucket(OzoneVolume sourceVolume, String sourceBucket,
- String linkBucket) throws IOException {
- BucketArgs.Builder builder = BucketArgs.newBuilder();
- builder.setBucketLayout(BucketLayout.DEFAULT)
- .setSourceVolume(sourceVolume.getName())
- .setSourceBucket(sourceBucket);
- sourceVolume.createBucket(linkBucket, builder.build());
- }
-}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]