This is an automated email from the ASF dual-hosted git repository.
rakeshr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new e07c66c HDDS-6425. OmMetadataManagerImpl#isBucketEmpty does not work
on FSO buckets. (#3227)
e07c66c is described below
commit e07c66ca278c0485b7505ca153d6f17c3553e896
Author: Jyotinder Singh <[email protected]>
AuthorDate: Fri Mar 25 07:32:45 2022 +0530
HDDS-6425. OmMetadataManagerImpl#isBucketEmpty does not work on FSO
buckets. (#3227)
---
.../fs/ozone/TestOzoneFileSystemMissingParent.java | 3 +
.../hadoop/fs/ozone/TestRootedOzoneFileSystem.java | 20 +++-
.../fs/ozone/TestRootedOzoneFileSystemWithFSO.java | 22 +++-
.../rooted/ITestRootedOzoneContractRootDir.java | 15 +++
.../hadoop/ozone/om/TestObjectStoreWithFSO.java | 81 ++++++++++++++
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 119 +++++++++++++++++----
6 files changed, 234 insertions(+), 26 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
index 9a3ac17..f42c5a5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileSystemMissingParent.java
@@ -129,5 +129,8 @@ public class TestOzoneFileSystemMissingParent {
LambdaTestUtils.intercept(OMException.class,
"Cannot create file : parent/file " + "as parent "
+ "directory doesn't exist", () -> stream.close());
+
+ // cleanup
+ fs.delete(renamedPath, true);
}
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
index 9ddd2d1..472d671 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystem.java
@@ -1095,6 +1095,10 @@ public class TestRootedOzoneFileSystem {
// Create test volume, bucket and key
fs.mkdirs(dirPath3);
// Delete volume recursively
+
+ // HDDS-6414: We can't delete a bucket that contains files or directories,
+ // so, clean up the bucket first.
+ Assert.assertTrue(fs.delete(dirPath3, true));
Assert.assertTrue(fs.delete(volumePath3, true));
// Verify the volume is deleted
Assert.assertFalse(volumeExist(volumeStr3));
@@ -1455,7 +1459,9 @@ public class TestRootedOzoneFileSystem {
assertTrue("Renamed failed: /dir/file1", getFs().exists(file1Destin));
FileStatus[] fStatus = getFs().listStatus(dirPath);
assertEquals("Renamed failed", 1, fStatus.length);
- getFs().delete(getBucketPath(), true);
+
+ // HDDS-6414: cleanup sub-paths
+ getFs().delete(dirPath, true);
}
@@ -1476,7 +1482,10 @@ public class TestRootedOzoneFileSystem {
assertTrue("Renamed failed", getFs().rename(file1Destin, abcRootPath));
assertTrue("Renamed filed: /a/b/c/file1", getFs().exists(new Path(
abcRootPath, "file1")));
- getFs().delete(getBucketPath(), true);
+
+ // HDDS-6414: cleanup sub-paths
+ getFs().delete(new Path(getBucketPath() + "/a/"), true);
+ getFs().delete(file1Destin, true);
}
/**
@@ -1512,7 +1521,9 @@ public class TestRootedOzoneFileSystem {
new Path(getBucketPath() + root + "/file2");
assertTrue("Rename failed",
getFs().exists(expectedFilePathAfterRename));
- getFs().delete(getBucketPath(), true);
+
+ // HDDS-6414: cleanup sub-paths
+ getFs().delete(destRootPath, true);
}
/**
@@ -1538,6 +1549,9 @@ public class TestRootedOzoneFileSystem {
} catch (IllegalArgumentException e) {
//expected
}
+
+ // cleanup
+ fs.delete(sourceRoot, true);
}
/**
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java
index f8842c7..cfcee3a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestRootedOzoneFileSystemWithFSO.java
@@ -21,6 +21,7 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.contract.ContractTestUtils;
import org.junit.Assert;
import org.junit.BeforeClass;
+import org.junit.After;
import org.junit.Ignore;
import org.junit.Test;
import org.junit.runner.RunWith;
@@ -64,6 +65,22 @@ public class TestRootedOzoneFileSystemWithFSO
setIsBucketFSOptimized(true);
}
+ /**
+ * HDDS-6414: Implementation gap for recursive deletion in FSO buckets.
+ * Please remove this once HDDS-6414 is merged.
+ *
+ * @throws IOException IOException
+ */
+ @After
+ public void cleanup() throws IOException {
+ getFs().delete(new Path(getBucketPath(), "root"), true);
+ getFs().delete(new Path(getBucketPath(), "dir"), true);
+ getFs().delete(new Path(getBucketPath(), "dir1"), true);
+ getFs().delete(new Path(getBucketPath(), "dir2"), true);
+ getFs().delete(new Path(getBucketPath(), "sub_dir1"), true);
+ getFs().delete(new Path(getBucketPath(), "file1"), true);
+ }
+
@Override
@Test
@Ignore("HDDS-2939")
@@ -171,6 +188,9 @@ public class TestRootedOzoneFileSystemWithFSO
LOG.info("Rename op-> source:{} to destin:{}", sourceRoot, subDir1);
// rename should fail and return false
Assert.assertFalse(getFs().rename(sourceRoot, subDir1));
- }
+ // cleanup
+ getFs().delete(subDir1, true);
+ getFs().delete(dir1Path, true);
+ }
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java
index 951a837..de342a5 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/contract/rooted/ITestRootedOzoneContractRootDir.java
@@ -26,6 +26,8 @@ import org.apache.hadoop.fs.contract.AbstractFSContract;
import org.junit.AfterClass;
import org.junit.BeforeClass;
+import org.junit.Ignore;
+import org.junit.Test;
/**
* Ozone contract test for ROOT directory operations.
@@ -58,4 +60,17 @@ public class ITestRootedOzoneContractRootDir extends
// OFS doesn't support creating files directly under root
}
+ @Override
+ @Test
+ @Ignore("HDDS-6414")
+ public void testRmEmptyRootDirNonRecursive() {
+ // Temporarily ignored test case, please add it back with HDDS-6414.
+ }
+
+ @Override
+ @Test
+ @Ignore("HDDS-6414")
+ public void testListEmptyRootDirectory() {
+ // Temporarily ignored test case, please add it back with HDDS-6414.
+ }
}
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
index 938a756..a0ba00a 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
@@ -213,6 +213,87 @@ public class TestObjectStoreWithFSO {
dirPathC.getObjectID(), true);
}
+ /**
+ * Tests bucket deletion behaviour. Buckets should not be allowed to be
+ * deleted if they contain files or directories under them.
+ *
+ * @throws Exception
+ */
+ @Test
+ public void testDeleteBucketWithKeys() throws Exception {
+ // Create temporary volume and bucket for this test.
+ OzoneBucket testBucket = TestDataUtil
+ .createVolumeAndBucket(cluster, BucketLayout.FILE_SYSTEM_OPTIMIZED);
+ String testVolumeName = testBucket.getVolumeName();
+ String testBucketName = testBucket.getName();
+
+ String parent = "a/b/c/";
+ String file = "key" + RandomStringUtils.randomNumeric(5);
+ String key = parent + file;
+
+ OzoneClient client = cluster.getClient();
+
+ ObjectStore objectStore = client.getObjectStore();
+ OzoneVolume ozoneVolume = objectStore.getVolume(testVolumeName);
+ assertEquals(ozoneVolume.getName(), testVolumeName);
+ OzoneBucket ozoneBucket = ozoneVolume.getBucket(testBucketName);
+ assertEquals(ozoneBucket.getName(), testBucketName);
+
+ Table<String, OmKeyInfo> openFileTable =
+ cluster.getOzoneManager().getMetadataManager()
+ .getOpenKeyTable(getBucketLayout());
+
+ // before file creation
+ verifyKeyInFileTable(openFileTable, file, 0, true);
+
+ // Create a key.
+ ozoneBucket.createKey(key, 10).close();
+ Assert.assertFalse(cluster.getOzoneManager().getMetadataManager()
+ .isBucketEmpty(testVolumeName, testBucketName));
+
+ try {
+ // Try to delete the bucket while a key is present under it.
+ ozoneVolume.deleteBucket(testBucketName);
+ fail("Bucket Deletion should fail, since bucket is not empty.");
+ } catch (IOException ioe) {
+ // Do nothing
+ }
+
+ // Delete the key (this only deletes the file)
+ ozoneBucket.deleteKey(key);
+ Assert.assertFalse(cluster.getOzoneManager().getMetadataManager()
+ .isBucketEmpty(testVolumeName, testBucketName));
+ try {
+ // Try to delete the bucket while intermediate dirs are present under it.
+ ozoneVolume.deleteBucket(testBucketName);
+ fail("Bucket Deletion should fail, since bucket still contains " +
+ "intermediate directories");
+ } catch (IOException ioe) {
+ // Do nothing
+ }
+
+ // Delete last level of directories.
+ ozoneBucket.deleteDirectory(parent, true);
+ Assert.assertFalse(cluster.getOzoneManager().getMetadataManager()
+ .isBucketEmpty(testVolumeName, testBucketName));
+ try {
+ // Try to delete the bucket while dirs are present under it.
+ ozoneVolume.deleteBucket(testBucketName);
+ fail("Bucket Deletion should fail, since bucket still contains " +
+ "intermediate directories");
+ } catch (IOException ioe) {
+ // Do nothing
+ }
+
+ // Delete all the intermediate directories
+ ozoneBucket.deleteDirectory("a/", true);
+ Assert.assertTrue(cluster.getOzoneManager().getMetadataManager()
+ .isBucketEmpty(testVolumeName, testBucketName));
+ ozoneVolume.deleteBucket(testBucketName);
+ // Cleanup the Volume.
+ cluster.getClient().getObjectStore().deleteVolume(testVolumeName);
+ }
+
@Test
public void testLookupKey() throws Exception {
String parent = "a/b/c/";
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index a924186..7395add 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -707,46 +707,121 @@ public class OmMetadataManagerImpl implements
OMMetadataManager {
@Override
public boolean isBucketEmpty(String volume, String bucket)
throws IOException {
- String keyPrefix = getBucketKey(volume, bucket);
+ String bucketKey = getBucketKey(volume, bucket);
+ OmBucketInfo omBucketInfo = getBucketTable().get(bucketKey);
+ String bucketId = String.valueOf(omBucketInfo.getObjectID());
+ BucketLayout bucketLayout = omBucketInfo.getBucketLayout();
+
+ // keyPrefix is different in case of fileTable and keyTable.
+ // 1. For OBS and LEGACY buckets:
+ // the entries are present in the keyTable and are of the
+ // format <bucketKey>/<key-name>
+ // 2. For FSO buckets:
+ // - TOP-LEVEL DIRECTORY would be of the format <bucket ID>/dirName
+ // inside the dirTable.
+ // - TOP-LEVEL FILE (a file directly placed under the bucket without
+ // any sub paths) would be of the format <bucket ID>/fileName inside
+ // the fileTable.
+ String keyPrefix =
+ bucketLayout.isFileSystemOptimized() ? bucketId : bucketKey;
+
+ // Check key/file Table
+ Table<String, OmKeyInfo> table = getKeyTable(bucketLayout);
+
+ // First check in table cache.
+ if (isKeyPresentInTableCache(keyPrefix, table)) {
+ return false;
+ }
+ // check in table
+ if (isKeyPresentInTable(keyPrefix, table)) {
+ return false; // we found at least one key with this vol/bucket
+ }
+
+ // Check dirTable as well in case of FSO bucket.
+ if (bucketLayout.isFileSystemOptimized()) {
+ // First check in dirTable cache
+ if (isKeyPresentInTableCache(keyPrefix, dirTable)) {
+ return false;
+ }
+ // Check in the table
+ return !isKeyPresentInTable(keyPrefix, dirTable);
+ }
+ return true;
+ }
+
- // First check in key table cache.
+ /**
+ * Checks if a key starting with a given keyPrefix exists in the table cache.
+ *
+ * @param keyPrefix - key prefix to be searched.
+ * @param table - table to be searched.
+ * @return true if the key is present in the cache.
+ */
+ private boolean isKeyPresentInTableCache(String keyPrefix,
+ Table table) {
Iterator<Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>>> iterator =
- ((TypedTable< String, OmKeyInfo>) keyTable).cacheIterator();
+ table.cacheIterator();
while (iterator.hasNext()) {
- Map.Entry< CacheKey<String>, CacheValue<OmKeyInfo>> entry =
+ Map.Entry<CacheKey<String>, CacheValue<OmKeyInfo>> entry =
iterator.next();
String key = entry.getKey().getCacheKey();
OmKeyInfo omKeyInfo = entry.getValue().getCacheValue();
// Making sure that entry is not for delete key request.
if (key.startsWith(keyPrefix) && omKeyInfo != null) {
- return false;
+ return true;
}
}
- try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>> keyIter =
- keyTable.iterator()) {
+ return false;
+ }
+
+ /**
+ * Checks if a key starts with the given prefix is present in the table.
+ *
+ * @param keyPrefix - Prefix to check for
+ * @param table - Table to check in
+ * @return true if the key is present in the table
+ * @throws IOException
+ */
+ private boolean isKeyPresentInTable(String keyPrefix,
+ Table<String, OmKeyInfo> table)
+ throws IOException {
+ try (TableIterator<String, ? extends KeyValue<String, OmKeyInfo>>
+ keyIter = table.iterator()) {
KeyValue<String, OmKeyInfo> kv = keyIter.seek(keyPrefix);
- if (kv != null) {
+ // Iterate through all the entries in the table which start with
+ // the current bucket's prefix.
+ while (kv != null && kv.getKey().startsWith(keyPrefix)) {
// Check the entry in db is not marked for delete. This can happen
// while entry is marked for delete, but it is not flushed to DB.
CacheValue<OmKeyInfo> cacheValue =
- keyTable.getCacheValue(new CacheKey(kv.getKey()));
- if (cacheValue != null) {
- if (kv.getKey().startsWith(keyPrefix)
- && cacheValue.getCacheValue() != null) {
- return false; // we found at least one key with this vol/bucket
- // prefix.
- }
- } else {
- if (kv.getKey().startsWith(keyPrefix)) {
- return false; // we found at least one key with this vol/bucket
- // prefix.
- }
+ table.getCacheValue(new CacheKey(kv.getKey()));
+
+ // Case 1: We found an entry, but no cache entry.
+ if (cacheValue == null) {
+ // we found at least one key with this prefix.
+ return true;
+ }
+
+ // Case 2a:
+ // We found a cache entry and cache value is not null.
+ if (cacheValue.getCacheValue() != null) {
+ return true;
}
- }
+ // Case 2b:
+ // Cache entry is present but cache value is null, hence this key is
+ // marked for deletion.
+ // However, we still need to iterate through the rest of the prefix
+ // range to check for other keys with the same prefix that might still
+ // be present.
+ if (!keyIter.hasNext()) {
+ break;
+ }
+ kv = keyIter.next();
+ }
}
- return true;
+ return false;
}
/**
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]