adoroszlai commented on code in PR #9084:
URL: https://github.com/apache/ozone/pull/9084#discussion_r2402479394
##########
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java:
##########
@@ -200,6 +220,120 @@ private OMRequest preExecute(OMRequest originalOmRequest)
throws IOException {
return modifiedOmRequest;
}
+ private PurgePathRequest createBucketDataAndGetPurgePathRequest(OmBucketInfo
bucketInfo) throws Exception {
+ OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder()
+ .setName("dir1")
+ .setCreationTime(Time.now())
+ .setModificationTime(Time.now())
+ .setObjectID(1)
+ .setParentObjectID(bucketInfo.getObjectID())
+ .setUpdateID(0)
+ .build();
+ String dirKey = OMRequestTestUtils.addDirKeyToDirTable(false, dir1,
volumeName,
+ bucketInfo.getBucketName(), 1L, omMetadataManager);
+ List<OmKeyInfo> subFiles = new ArrayList<>();
+ List<OmKeyInfo> subDirs = new ArrayList<>();
+ List<String> subFileKeys = new ArrayList<>();
+ List<String> subDirKeys = new ArrayList<>();
+ for (int id = 1; id < 10; id++) {
+ OmDirectoryInfo subdir = new OmDirectoryInfo.Builder()
+ .setName("subdir" + id)
+ .setCreationTime(Time.now())
+ .setModificationTime(Time.now())
+ .setObjectID(2 * id)
+ .setParentObjectID(dir1.getObjectID())
+ .setUpdateID(0)
+ .build();
+ String subDirectoryPath = OMRequestTestUtils.addDirKeyToDirTable(false,
subdir, volumeName,
+ bucketInfo.getBucketName(), 2 * id, omMetadataManager);
+ subDirKeys.add(subDirectoryPath);
+ OmKeyInfo subFile =
+ OMRequestTestUtils.createOmKeyInfo(volumeName,
bucketInfo.getBucketName(), "file" + id,
+ RatisReplicationConfig.getInstance(ONE))
+ .setObjectID(2 * id + 1)
+ .setParentObjectID(dir1.getObjectID())
+ .setUpdateID(100L)
+ .build();
+ String subFilePath = OMRequestTestUtils.addFileToKeyTable(false, true,
subFile.getKeyName(),
+ subFile, 1234L, 2 * id + 1, omMetadataManager);
+ subFileKeys.add(subFilePath);
+ subFile.setKeyName("dir1/" + subFile.getKeyName());
+ subFiles.add(subFile);
+ subDirs.add(getOmKeyInfo(volumeName, bucketInfo.getBucketName(), subdir,
+ "dir1/" + subdir.getName()));
+ }
+ String deletedDirKey = OMRequestTestUtils.deleteDir(dirKey, volumeName,
bucketInfo.getBucketName(),
+ omMetadataManager);
+ for (String subDirKey : subDirKeys) {
+ assertTrue(omMetadataManager.getDirectoryTable().isExist(subDirKey));
+ }
+ for (String subFileKey : subFileKeys) {
+ assertTrue(omMetadataManager.getFileTable().isExist(subFileKey));
+ }
+ assertFalse(omMetadataManager.getDirectoryTable().isExist(dirKey));
+ Long volumeId = omMetadataManager.getVolumeId(bucketInfo.getVolumeName());
+ long bucketId = bucketInfo.getObjectID();
+ return wrapPurgeRequest(volumeId, bucketId, deletedDirKey, subFiles,
subDirs);
+ }
+
+ @Test
+ public void testBucketLockWithPurgeDirectory() throws Exception {
+ when(ozoneManager.getDefaultReplicationConfig())
+
.thenReturn(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
+ Random random = new Random();
+ String bucket1 = "bucket" + random.nextInt();
+ // Add volume, bucket and key entries to OM DB.
+ OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucket1,
+ omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED);
+ String bucketKey1 = omMetadataManager.getBucketKey(volumeName, bucket1);
+ OmBucketInfo bucketInfo1 =
omMetadataManager.getBucketTable().get(bucketKey1);
+ PurgePathRequest purgePathRequest1 =
createBucketDataAndGetPurgePathRequest(bucketInfo1);
+ String bucket2 = "bucket" + random.nextInt();
+ // Add volume, bucket and key entries to OM DB.
+ OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucket2,
+ omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED);
+ String bucketKey2 = omMetadataManager.getBucketKey(volumeName, bucket1);
+ OmBucketInfo bucketInfo2 =
omMetadataManager.getBucketTable().get(bucketKey2);
+ PurgePathRequest purgePathRequest2 =
createBucketDataAndGetPurgePathRequest(bucketInfo2);
+ IOzoneManagerLock lock = spy(omMetadataManager.getLock());
+ Set<Long> acquiredLockIds = new ConcurrentSkipListSet<>();
+ Set<String> acquiredLockKeys = new ConcurrentSkipListSet<>();
+ doAnswer(i -> {
+ long threadId = Thread.currentThread().getId();
+ GenericTestUtils.waitFor(() -> !acquiredLockIds.contains(threadId) ||
acquiredLockIds.size() == 2, 1000, 30000);
+ OMLockDetails lockDetails = (OMLockDetails) i.callRealMethod();
+ acquiredLockIds.add(threadId);
+ acquiredLockKeys.add(i.getArgument(1) + "/" + i.getArgument(2));
+ return lockDetails;
+ }).when(lock).acquireWriteLock(eq(BUCKET_LOCK), anyString(), anyString());
+
+ doAnswer(i -> {
+ long threadId = Thread.currentThread().getId();
+ GenericTestUtils.waitFor(() -> !acquiredLockIds.contains(threadId) ||
acquiredLockIds.size() == 2, 1000, 30000);
+ OMLockDetails lockDetails = (OMLockDetails) i.callRealMethod();
+ acquiredLockIds.add(threadId);
+ for (String[] lockKey : (List<String[]>) i.getArgument(1)) {
+ acquiredLockKeys.add(lockKey[0] + "/" + lockKey[1]);
+ }
+ return lockDetails;
+ }).when(lock).acquireWriteLocks(eq(BUCKET_LOCK), anyCollection());
+ when(omMetadataManager.getLock()).thenReturn(lock);
+ OMDirectoriesPurgeRequestWithFSO purgePathRequests1 = new
OMDirectoriesPurgeRequestWithFSO(
+ preExecute(createPurgeKeysRequest(null,
Arrays.asList(purgePathRequest1, purgePathRequest2))));
+ OMDirectoriesPurgeRequestWithFSO purgePathRequests2 = new
OMDirectoriesPurgeRequestWithFSO(
+ preExecute(createPurgeKeysRequest(null,
Arrays.asList(purgePathRequest2, purgePathRequest1))));
+ CompletableFuture future1 = CompletableFuture.runAsync(() ->
purgePathRequests1.validateAndUpdateCache(ozoneManager,
+ 100L));
+ CompletableFuture future2 = CompletableFuture.runAsync(() ->
purgePathRequests2.validateAndUpdateCache(ozoneManager,
+ 100L));
+ future1.get();
+ future2.get();
+ assertEquals(Stream.of(bucketInfo1.getVolumeName() + "/" +
bucketInfo1.getBucketName(),
+ bucketInfo2.getVolumeName() + "/" +
bucketInfo2.getBucketName()).collect(Collectors.toSet()),
+ acquiredLockKeys);
+ reset(lock);
Review Comment:
`reset(lock)` in `finally`?
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java:
##########
@@ -262,13 +264,18 @@ void optimizeDirDeletesAndSubmitRequest(
long remainingBufLimit, KeyManager keyManager,
CheckedFunction<KeyValue<String, OmKeyInfo>, Boolean, IOException>
reclaimableDirChecker,
CheckedFunction<KeyValue<String, OmKeyInfo>, Boolean, IOException>
reclaimableFileChecker,
+ Collection<BucketNameInfo> bucketInfos,
UUID expectedPreviousSnapshotId, long rnCnt) throws InterruptedException
{
// Optimization to handle delete sub-dir and keys to remove quickly
// This case will be useful to handle when depth of directory is high
int subdirDelNum = 0;
int subDirRecursiveCnt = 0;
int consumedSize = 0;
+ Map<Pair<Long, Long>, BucketNameInfo> bucketNameInfoMap =
+ bucketInfos.stream().collect(Collectors.toMap(
+ bucketInfo -> Pair.of(bucketInfo.getVolumeId(),
bucketInfo.getBucketId()),
Review Comment:
Please use `VolumeBucketId`.
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java:
##########
@@ -544,6 +544,19 @@ public String getBucketKeyPrefixFSO(String volume, String
bucket) throws IOExcep
return getOzoneKeyFSO(volume, bucket, OM_KEY_PREFIX);
}
+ /**
+ * {@inheritDoc}
+ */
+ @Override
+ public VolumeBucketId getVolumeBucketIdPairFSO(String fsoKey) throws
IOException {
+ String[] keySplit = fsoKey.split(OM_KEY_PREFIX);
+ try {
+ return new VolumeBucketId(Long.parseLong(keySplit[1]),
Long.parseLong(keySplit[2]));
+ } catch (NumberFormatException e) {
+ throw new IOException("Invalid format for FSO Key: " + fsoKey, e);
+ }
+ }
Review Comment:
On another look, why is this being added to the `OMMetadataManager`
interface in the first place? This looks like a small utility method, doesn't
access any storage.
##########
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/service/DirectoryDeletingService.java:
##########
@@ -613,12 +623,27 @@ private boolean processDeletedDirectories(SnapshotInfo
currentSnapshotInfo, KeyM
long subFileNum = 0L;
int consumedSize = 0;
List<PurgePathRequest> purgePathRequestList = new ArrayList<>();
+ Map<Pair<String, String>, BucketNameInfo> bucketNameInfos = new
HashMap<>();
+
List<Pair<String, OmKeyInfo>> allSubDirList = new ArrayList<>();
while (remainingBufLimit > 0) {
KeyValue<String, OmKeyInfo> pendingDeletedDirInfo =
dirSupplier.get();
if (pendingDeletedDirInfo == null) {
break;
}
+ Pair<String, String> volumeBucketPair =
Pair.of(pendingDeletedDirInfo.getValue().getVolumeName(),
+ pendingDeletedDirInfo.getValue().getBucketName());
Review Comment:
Please add local var for `pendingDeletedDirInfo.getValue()` to make the code
more readable.
##########
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMDirectoriesPurgeRequestAndResponse.java:
##########
@@ -200,6 +220,120 @@ private OMRequest preExecute(OMRequest originalOmRequest)
throws IOException {
return modifiedOmRequest;
}
+ private PurgePathRequest createBucketDataAndGetPurgePathRequest(OmBucketInfo
bucketInfo) throws Exception {
+ OmDirectoryInfo dir1 = new OmDirectoryInfo.Builder()
+ .setName("dir1")
+ .setCreationTime(Time.now())
+ .setModificationTime(Time.now())
+ .setObjectID(1)
+ .setParentObjectID(bucketInfo.getObjectID())
+ .setUpdateID(0)
+ .build();
+ String dirKey = OMRequestTestUtils.addDirKeyToDirTable(false, dir1,
volumeName,
+ bucketInfo.getBucketName(), 1L, omMetadataManager);
+ List<OmKeyInfo> subFiles = new ArrayList<>();
+ List<OmKeyInfo> subDirs = new ArrayList<>();
+ List<String> subFileKeys = new ArrayList<>();
+ List<String> subDirKeys = new ArrayList<>();
+ for (int id = 1; id < 10; id++) {
+ OmDirectoryInfo subdir = new OmDirectoryInfo.Builder()
+ .setName("subdir" + id)
+ .setCreationTime(Time.now())
+ .setModificationTime(Time.now())
+ .setObjectID(2 * id)
+ .setParentObjectID(dir1.getObjectID())
+ .setUpdateID(0)
+ .build();
+ String subDirectoryPath = OMRequestTestUtils.addDirKeyToDirTable(false,
subdir, volumeName,
+ bucketInfo.getBucketName(), 2 * id, omMetadataManager);
+ subDirKeys.add(subDirectoryPath);
+ OmKeyInfo subFile =
+ OMRequestTestUtils.createOmKeyInfo(volumeName,
bucketInfo.getBucketName(), "file" + id,
+ RatisReplicationConfig.getInstance(ONE))
+ .setObjectID(2 * id + 1)
+ .setParentObjectID(dir1.getObjectID())
+ .setUpdateID(100L)
+ .build();
+ String subFilePath = OMRequestTestUtils.addFileToKeyTable(false, true,
subFile.getKeyName(),
+ subFile, 1234L, 2 * id + 1, omMetadataManager);
+ subFileKeys.add(subFilePath);
+ subFile.setKeyName("dir1/" + subFile.getKeyName());
+ subFiles.add(subFile);
+ subDirs.add(getOmKeyInfo(volumeName, bucketInfo.getBucketName(), subdir,
+ "dir1/" + subdir.getName()));
+ }
+ String deletedDirKey = OMRequestTestUtils.deleteDir(dirKey, volumeName,
bucketInfo.getBucketName(),
+ omMetadataManager);
+ for (String subDirKey : subDirKeys) {
+ assertTrue(omMetadataManager.getDirectoryTable().isExist(subDirKey));
+ }
+ for (String subFileKey : subFileKeys) {
+ assertTrue(omMetadataManager.getFileTable().isExist(subFileKey));
+ }
+ assertFalse(omMetadataManager.getDirectoryTable().isExist(dirKey));
+ Long volumeId = omMetadataManager.getVolumeId(bucketInfo.getVolumeName());
+ long bucketId = bucketInfo.getObjectID();
+ return wrapPurgeRequest(volumeId, bucketId, deletedDirKey, subFiles,
subDirs);
+ }
+
+ @Test
+ public void testBucketLockWithPurgeDirectory() throws Exception {
+ when(ozoneManager.getDefaultReplicationConfig())
+
.thenReturn(RatisReplicationConfig.getInstance(HddsProtos.ReplicationFactor.THREE));
+ Random random = new Random();
+ String bucket1 = "bucket" + random.nextInt();
Review Comment:
Please use `RandomUtils.secure().randomInt()` instead of creating a `new
Random()` instance.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]