This is an automated email from the ASF dual-hosted git repository.
weichiu pushed a commit to branch HDDS-7593
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/HDDS-7593 by this push:
new 061c0a813e HDDS-10649. Handle auto hsync commit key having
directories. (#6530)
061c0a813e is described below
commit 061c0a813e96388aae6ee6bd183efbaf9d2a597b
Author: Ashish Kumar <[email protected]>
AuthorDate: Wed May 1 04:22:52 2024 +0530
HDDS-10649. Handle auto hsync commit key having directories. (#6530)
Co-authored-by: ashishk <[email protected]>
---
.../rpc/TestOzoneClientMultipartUploadWithFSO.java | 5 +-
.../hadoop/ozone/om/TestObjectStoreWithFSO.java | 5 +-
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 5 +-
.../request/file/OMFileCreateRequestWithFSO.java | 2 +-
.../ozone/om/request/file/OMFileRequest.java | 10 +--
.../request/key/OMAllocateBlockRequestWithFSO.java | 6 +-
.../om/request/key/OMKeyCommitRequestWithFSO.java | 4 +-
.../om/request/key/OMKeyCreateRequestWithFSO.java | 2 +-
.../S3InitiateMultipartUploadRequestWithFSO.java | 2 +-
...estS3InitiateMultipartUploadRequestWithFSO.java | 3 +-
.../om/service/TestOpenKeyCleanupService.java | 71 ++++++++++++++++++----
11 files changed, 82 insertions(+), 33 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
index 1e75a4d10a..b943930f62 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadWithFSO.java
@@ -50,13 +50,13 @@ import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartCommitUploadPartInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartInfo;
import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OmMultipartUploadCompleteInfo;
-import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import static java.nio.charset.StandardCharsets.UTF_8;
import static org.apache.hadoop.hdds.StringUtils.string2Bytes;
@@ -874,7 +874,8 @@ public class TestOzoneClientMultipartUploadWithFSO {
assertNotNull(omKeyInfo);
assertNotNull(omMultipartKeyInfo);
- assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getKeyName());
+ assertEquals(keyName, omKeyInfo.getKeyName());
+ assertEquals(OzoneFSUtils.getFileName(keyName), omKeyInfo.getFileName());
assertEquals(uploadID, omMultipartKeyInfo.getUploadID());
for (OzoneManagerProtocolProtos.PartKeyInfo partKeyInfo :
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
index 2e58b6dbb7..5e3a3aa198 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestObjectStoreWithFSO.java
@@ -820,10 +820,7 @@ public class TestObjectStoreWithFSO {
} else {
OmKeyInfo omKeyInfo = openFileTable.get(dbOpenFileKey);
assertNotNull(omKeyInfo, "Table is empty!");
- // used startsWith because the key format is,
- // <parentID>/fileName/<clientID> and clientID is not visible.
- assertEquals(omKeyInfo.getKeyName(), fileName,
- "Invalid Key: " + omKeyInfo.getObjectInfo());
+ assertEquals(omKeyInfo.getFileName(), fileName, "Invalid file name: " +
omKeyInfo.getObjectInfo());
assertEquals(parentID, omKeyInfo.getParentObjectID(), "Invalid Key");
}
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index 5e68359193..b5ea6d429a 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -1848,10 +1848,13 @@ public class OmMetadataManagerImpl implements
OMMetadataManager,
!openKeyInfo.getMetadata().containsKey(OzoneConsts.LEASE_RECOVERY)) {
// add hsync'ed keys
final OmKeyInfo info = kt.get(dbKeyName);
+ // Set keyName from openFileTable which contains keyName with full
path like(/a/b/c/d/e/file1),
+ // which is required in commit key request.
+ // Whereas fileTable contains only leaf in keyName(like file1) and
so cannot be used in commit request.
final KeyArgs.Builder keyArgs = KeyArgs.newBuilder()
.setVolumeName(info.getVolumeName())
.setBucketName(info.getBucketName())
- .setKeyName(info.getKeyName())
+ .setKeyName(openKeyInfo.getKeyName())
.setDataSize(info.getDataSize());
java.util.Optional.ofNullable(info.getLatestVersionLocations())
.map(OmKeyLocationInfoGroup::getLocationList)
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
index 6910061c77..8fe6d3381b 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestWithFSO.java
@@ -201,7 +201,7 @@ public class OMFileCreateRequestWithFSO extends
OMFileCreateRequest {
// Even if bucket gets deleted, when commitKey we shall identify if
// bucket gets deleted.
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
- dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(),
+ dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(),
keyName,
trxnLogIndex);
// Add cache entries for the prefix directories.
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
index a1e660691c..65bf355bc4 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java
@@ -472,15 +472,17 @@ public final class OMFileRequest {
*/
public static void addOpenFileTableCacheEntry(
OMMetadataManager omMetadataManager, String dbOpenFileName,
- @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) {
+ @Nullable OmKeyInfo omFileInfo, String fileName, String keyName,
long trxnLogIndex) {
final Table<String, OmKeyInfo> table = omMetadataManager.getOpenKeyTable(
BucketLayout.FILE_SYSTEM_OPTIMIZED);
if (omFileInfo != null) {
- // New key format for the openFileTable.
// For example, the user given key path is '/a/b/c/d/e/file1', then in DB
- // keyName field stores only the leaf node name, which is 'file1'.
- omFileInfo.setKeyName(fileName);
+ // keyName field stores full path, which is '/a/b/c/d/e/file1'.
+ // This is required as in some cases like hsync, Keys inside
openKeyTable is used for auto commit after expiry.
+ // (Full key path is required in commit key request)
+ omFileInfo.setKeyName(keyName);
+ // fileName will contain only the leaf(file1) which is actual file name.
omFileInfo.setFileName(fileName);
table.addCacheEntry(dbOpenFileName, omFileInfo, trxnLogIndex);
} else {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
index 4f0c9fe602..838f3ff5a1 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMAllocateBlockRequestWithFSO.java
@@ -160,7 +160,7 @@ public class OMAllocateBlockRequestWithFSO extends
OMAllocateBlockRequest {
openKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// Add to cache.
- addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName,
+ addOpenTableCacheEntry(trxnLogIndex, omMetadataManager, openKeyName,
keyName,
openKeyInfo);
omResponse.setAllocateBlockResponse(AllocateBlockResponse.newBuilder()
@@ -215,11 +215,11 @@ public class OMAllocateBlockRequestWithFSO extends
OMAllocateBlockRequest {
}
private void addOpenTableCacheEntry(long trxnLogIndex,
- OMMetadataManager omMetadataManager, String openKeyName,
+ OMMetadataManager omMetadataManager, String openKeyName, String keyName,
OmKeyInfo openKeyInfo) {
String fileName = openKeyInfo.getFileName();
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager, openKeyName,
- openKeyInfo, fileName, trxnLogIndex);
+ openKeyInfo, fileName, keyName, trxnLogIndex);
}
@Nonnull
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
index 0362f068e8..dd90e7ad89 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestWithFSO.java
@@ -274,7 +274,7 @@ public class OMKeyCommitRequestWithFSO extends
OMKeyCommitRequest {
// indicating the key is removed from OpenKeyTable.
// So that this key can't be committed again.
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
- dbOpenFileKey, null, fileName, trxnLogIndex);
+ dbOpenFileKey, null, fileName, keyName, trxnLogIndex);
// Prevent hsync metadata from getting committed to the final key
omKeyInfo.getMetadata().remove(OzoneConsts.HSYNC_CLIENT_ID);
@@ -284,7 +284,7 @@ public class OMKeyCommitRequestWithFSO extends
OMKeyCommitRequest {
} else if (newOpenKeyInfo != null) {
// isHSync is true and newOpenKeyInfo is set, update OpenKeyTable
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
- dbOpenFileKey, newOpenKeyInfo, fileName, trxnLogIndex);
+ dbOpenFileKey, newOpenKeyInfo, fileName, keyName, trxnLogIndex);
}
OMFileRequest.addFileTableCacheEntry(omMetadataManager, dbFileKey,
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
index 6fe8c12085..07f173bfd8 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCreateRequestWithFSO.java
@@ -186,7 +186,7 @@ public class OMKeyCreateRequestWithFSO extends
OMKeyCreateRequest {
// Even if bucket gets deleted, when commitKey we shall identify if
// bucket gets deleted.
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
- dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(),
+ dbOpenFileName, omFileInfo, pathInfoFSO.getLeafNodeName(),
keyName,
trxnLogIndex);
// Add cache entries for the prefix directories.
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
index d1c865fbc7..6c30f8b816 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3InitiateMultipartUploadRequestWithFSO.java
@@ -202,7 +202,7 @@ public class S3InitiateMultipartUploadRequestWithFSO
missingParentInfos, null);
OMFileRequest.addOpenFileTableCacheEntry(omMetadataManager,
- multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(),
+ multipartOpenKey, omKeyInfo, pathInfoFSO.getLeafNodeName(), keyName,
transactionLogIndex);
// Add to cache
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
index cbdea75720..783662f04d 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/s3/multipart/TestS3InitiateMultipartUploadRequestWithFSO.java
@@ -98,8 +98,7 @@ public class TestS3InitiateMultipartUploadRequestWithFSO
assertTrue(
omKeyInfo.getLatestVersionLocations().isMultipartKey(),
"isMultipartKey is false!");
- assertEquals(fileName, omKeyInfo.getKeyName(),
- "FileName mismatches!");
+ assertEquals(fileName, omKeyInfo.getFileName(), "FileName mismatches!");
assertEquals(parentID, omKeyInfo.getParentObjectID(),
"ParentId mismatches!");
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
index aa51e2be7b..a656fa1b08 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/service/TestOpenKeyCleanupService.java
@@ -171,8 +171,8 @@ class TestOpenKeyCleanupService {
long numOpenKeysCleaned = metrics.getNumOpenKeysCleaned();
long numOpenKeysHSyncCleaned = metrics.getNumOpenKeysHSyncCleaned();
final int keyCount = numDEFKeys + numFSOKeys;
- createOpenKeys(numDEFKeys, false, BucketLayout.DEFAULT, false);
- createOpenKeys(numFSOKeys, hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED,
false);
+ createOpenKeys(numDEFKeys, false, BucketLayout.DEFAULT, false, false);
+ createOpenKeys(numFSOKeys, hsync, BucketLayout.FILE_SYSTEM_OPTIMIZED,
false, false);
// wait for open keys to expire
Thread.sleep(EXPIRE_THRESHOLD_MS);
@@ -239,9 +239,9 @@ class TestOpenKeyCleanupService {
when(om.getScmClient().getContainerClient().getContainerWithPipeline(anyLong()))
.thenReturn(new
ContainerWithPipeline(Mockito.mock(ContainerInfo.class), pipeline));
- createOpenKeys(keyCount, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, false);
+ createOpenKeys(keyCount, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, false,
false);
// create 2 more key and mark recovery flag set
- createOpenKeys(2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, true);
+ createOpenKeys(2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED, true, false);
// wait for open keys to expire
Thread.sleep(EXPIRE_THRESHOLD_MS);
@@ -257,7 +257,51 @@ class TestOpenKeyCleanupService {
waitForOpenKeyCleanup(true, BucketLayout.FILE_SYSTEM_OPTIMIZED);
// 2 keys should still remain in openKey table
- assertEquals(2, getOpenKeyInfo(BucketLayout.FILE_SYSTEM_OPTIMIZED).size());
+ assertEquals(2, getKeyInfo(BucketLayout.FILE_SYSTEM_OPTIMIZED,
true).size());
+ }
+
+ @Test
+ public void testCommitExpiredHsyncKeys() throws Exception {
+ OpenKeyCleanupService openKeyCleanupService =
+ (OpenKeyCleanupService) keyManager.getOpenKeyCleanupService();
+
+ openKeyCleanupService.suspend();
+ // wait for submitted tasks to complete
+ Thread.sleep(SERVICE_INTERVAL);
+
+ int keyCount = 10;
+ Pipeline pipeline = Pipeline.newBuilder()
+ .setState(Pipeline.PipelineState.OPEN)
+ .setId(PipelineID.randomId())
+ .setReplicationConfig(
+
StandaloneReplicationConfig.getInstance(HddsProtos.ReplicationFactor.ONE))
+ .setNodes(new ArrayList<>())
+ .build();
+
+
when(om.getScmClient().getContainerClient().getContainerWithPipeline(anyLong()))
+ .thenReturn(new
ContainerWithPipeline(Mockito.mock(ContainerInfo.class), pipeline));
+
+ // Create 5 keys with directories
+ createOpenKeys(keyCount / 2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED,
false, true);
+ // Create 5 keys without directory
+ createOpenKeys(keyCount / 2, true, BucketLayout.FILE_SYSTEM_OPTIMIZED,
false, false);
+
+ // wait for open keys to expire
+ Thread.sleep(EXPIRE_THRESHOLD_MS);
+
+ // 10 keys should be returned after hard limit period
+ assertEquals(keyCount, getExpiredOpenKeys(true,
BucketLayout.FILE_SYSTEM_OPTIMIZED));
+ assertExpiredOpenKeys(false, true,
+ BucketLayout.FILE_SYSTEM_OPTIMIZED);
+
+ openKeyCleanupService.resume();
+
+ // keys should be recovered and there should not be any expired key pending
+ waitForOpenKeyCleanup(true, BucketLayout.FILE_SYSTEM_OPTIMIZED);
+
+ List<OmKeyInfo> listKeyInfo =
getKeyInfo(BucketLayout.FILE_SYSTEM_OPTIMIZED, false);
+ // Verify keyName and fileName is same after auto commit key.
+ listKeyInfo.stream().forEach(key -> assertEquals(key.getKeyName(),
key.getFileName()));
}
/**
@@ -408,17 +452,20 @@ class TestOpenKeyCleanupService {
}
}
- private List<OmKeyInfo> getOpenKeyInfo(BucketLayout bucketLayout) {
+ private List<OmKeyInfo> getKeyInfo(BucketLayout bucketLayout, boolean
openKey) {
List<OmKeyInfo> omKeyInfo = new ArrayList<>();
- Table<String, OmKeyInfo> openFileTable =
- om.getMetadataManager().getOpenKeyTable(bucketLayout);
+ Table<String, OmKeyInfo> fileTable;
+ if (openKey) {
+ fileTable = om.getMetadataManager().getOpenKeyTable(bucketLayout);
+ } else {
+ fileTable = om.getMetadataManager().getKeyTable(bucketLayout);
+ }
try (TableIterator<String, ? extends Table.KeyValue<String, OmKeyInfo>>
- iterator = openFileTable.iterator()) {
+ iterator = fileTable.iterator()) {
while (iterator.hasNext()) {
omKeyInfo.add(iterator.next().getValue());
}
-
} catch (Exception e) {
}
return omKeyInfo;
@@ -432,7 +479,7 @@ class TestOpenKeyCleanupService {
}
private void createOpenKeys(int keyCount, boolean hsync,
- BucketLayout bucketLayout, boolean recovery) throws IOException {
+ BucketLayout bucketLayout, boolean recovery, boolean withDir) throws
IOException {
String volume = UUID.randomUUID().toString();
String bucket = UUID.randomUUID().toString();
for (int x = 0; x < keyCount; x++) {
@@ -442,7 +489,7 @@ class TestOpenKeyCleanupService {
volume = UUID.randomUUID().toString();
}
}
- String key = UUID.randomUUID().toString();
+ String key = withDir ? "dir1/dir2/" + UUID.randomUUID() :
UUID.randomUUID().toString();
createVolumeAndBucket(volume, bucket, bucketLayout);
final int numBlocks = RandomUtils.nextInt(1, 3);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]