This is an automated email from the ASF dual-hosted git repository.
rakeshr pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new e5c647e HDDS-5370. [FSO] Handle OMClientRequest based on the bucket
layout. (#2533)
e5c647e is described below
commit e5c647eb65b896d629f8e20e066cccae32834d80
Author: Aryan Gupta <[email protected]>
AuthorDate: Fri Sep 24 10:30:45 2021 +0530
HDDS-5370. [FSO] Handle OMClientRequest based on the bucket layout. (#2533)
---
.../om/ratis/TestOzoneManagerRatisRequest.java | 71 +++++++++++
.../hadoop/ozone/om/DirectoryDeletingService.java | 21 ++--
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 47 +++++---
.../hadoop/ozone/om/OmMetadataManagerImpl.java | 4 +
.../hadoop/ozone/om/TrashOzoneFileSystem.java | 2 +-
.../om/ratis/utils/OzoneManagerRatisUtils.java | 130 +++++++++++++++++----
.../ozone/om/request/key/OMKeyDeleteRequest.java | 58 +++++----
.../ozone/om/request/key/acl/OMKeyAclRequest.java | 38 ++++++
...OzoneManagerProtocolServerSideTranslatorPB.java | 8 +-
.../protocolPB/OzoneManagerRequestHandler.java | 12 +-
.../ozone/om/request/TestOMRequestUtils.java | 22 +++-
.../om/request/key/TestOMKeyDeleteRequest.java | 13 ++-
.../request/key/TestOMKeyDeleteRequestWithFSO.java | 10 +-
13 files changed, 345 insertions(+), 91 deletions(-)
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
new file mode 100644
index 0000000..0c5431e
--- /dev/null
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/ratis/TestOzoneManagerRatisRequest.java
@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.om.ratis;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.OMConfigKeys;
+import org.apache.hadoop.ozone.om.OMMetadataManager;
+import org.apache.hadoop.ozone.om.OmMetadataManagerImpl;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.request.OMClientRequest;
+import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
+import
org.apache.hadoop.ozone.om.request.s3.multipart.S3MultipartUploadCompleteRequest;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
+import org.junit.Assert;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import org.mockito.Mockito;
+
+import java.util.ArrayList;
+
+import static org.mockito.Mockito.when;
+
+/**
+ * Test: Creating a client request for a bucket which doesn't exist.
+ */
+public class TestOzoneManagerRatisRequest {
+ @Rule public TemporaryFolder folder = new TemporaryFolder();
+
+ private OzoneManager ozoneManager;
+ private OzoneConfiguration ozoneConfiguration = new OzoneConfiguration();
+ private OMMetadataManager omMetadataManager;
+
+ @Test(timeout = 300_000)
+ public void testRequestWithNonExistentBucket()
+ throws Exception {
+ ozoneManager = Mockito.mock(OzoneManager.class);
+ ozoneConfiguration.set(OMConfigKeys.OZONE_OM_DB_DIRS,
+ folder.newFolder().getAbsolutePath());
+ omMetadataManager = new OmMetadataManagerImpl(ozoneConfiguration);
+ when(ozoneManager.getMetadataManager()).thenReturn(omMetadataManager);
+
+ String volumeName = "vol1";
+ String bucketName = "invalidBuck";
+ OzoneManagerProtocolProtos.OMRequest omRequest = TestOMRequestUtils
+ .createCompleteMPURequest(volumeName, bucketName, "mpuKey", "mpuKeyID",
+ new ArrayList<>());
+
+ OMClientRequest req =
+ OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager);
+ Assert.assertNotNull(req);
+ Assert.assertTrue("Unexpected request on invalid bucket",
+ req instanceof S3MultipartUploadCompleteRequest);
+ }
+}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
index a8d66cd..8855f85 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/DirectoryDeletingService.java
@@ -18,6 +18,7 @@ package org.apache.hadoop.ozone.om;
import com.google.common.annotations.VisibleForTesting;
import com.google.protobuf.ServiceException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.utils.BackgroundService;
import org.apache.hadoop.hdds.utils.BackgroundTask;
import org.apache.hadoop.hdds.utils.BackgroundTaskQueue;
@@ -59,7 +60,6 @@ import static
org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_PATH_DELETING_LIMIT_
*/
public class DirectoryDeletingService extends BackgroundService {
- private final KeyManager keyManager;
private final OzoneManager ozoneManager;
private AtomicLong deletedDirsCount;
private AtomicLong deletedFilesCount;
@@ -76,15 +76,15 @@ public class DirectoryDeletingService extends
BackgroundService {
private final long pathLimitPerTask;
public DirectoryDeletingService(long interval, TimeUnit unit,
- long serviceTimeout, OzoneManager ozoneManager) {
+ long serviceTimeout, OzoneManager ozoneManager,
+ OzoneConfiguration configuration) {
super("DirectoryDeletingService", interval, unit,
DIR_DELETING_CORE_POOL_SIZE, serviceTimeout);
- this.keyManager = ozoneManager.getKeyManager();
this.ozoneManager = ozoneManager;
this.deletedDirsCount = new AtomicLong(0);
this.deletedFilesCount = new AtomicLong(0);
this.runCount = new AtomicLong(0);
- this.pathLimitPerTask = ozoneManager.getConfiguration()
+ this.pathLimitPerTask = configuration
.getInt(OZONE_PATH_DELETING_LIMIT_PER_TASK,
OZONE_PATH_DELETING_LIMIT_PER_TASK_DEFAULT);
}
@@ -126,16 +126,16 @@ public class DirectoryDeletingService extends
BackgroundService {
try {
long startTime = Time.monotonicNow();
// step-1) Get one pending deleted directory
- OmKeyInfo pendingDeletedDirInfo = keyManager.getPendingDeletionDir();
+ OmKeyInfo pendingDeletedDirInfo =
+ ozoneManager.getKeyManager().getPendingDeletionDir();
if (pendingDeletedDirInfo != null) {
if (LOG.isDebugEnabled()) {
LOG.debug("Pending deleted dir name: {}",
pendingDeletedDirInfo.getKeyName());
}
// step-1: get all sub directories under the deletedDir
- List<OmKeyInfo> dirs =
- keyManager.getPendingDeletionSubDirs(pendingDeletedDirInfo,
- count);
+ List<OmKeyInfo> dirs = ozoneManager.getKeyManager()
+ .getPendingDeletionSubDirs(pendingDeletedDirInfo, count);
count = count - dirs.size();
List<OmKeyInfo> deletedSubDirList = new ArrayList<>();
for (OmKeyInfo dirInfo : dirs) {
@@ -147,9 +147,8 @@ public class DirectoryDeletingService extends
BackgroundService {
}
// step-2: get all sub files under the deletedDir
- List<OmKeyInfo> purgeDeletedFiles =
- keyManager.getPendingDeletionSubFiles(pendingDeletedDirInfo,
- count);
+ List<OmKeyInfo> purgeDeletedFiles = ozoneManager.getKeyManager()
+ .getPendingDeletionSubFiles(pendingDeletedDirInfo, count);
count = count - purgeDeletedFiles.size();
if (LOG.isDebugEnabled()) {
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 9a0fd5b..343b8fc 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -97,7 +97,7 @@ import org.apache.hadoop.ozone.om.helpers.OzoneAclUtil;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.om.request.file.OMFileRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo;
@@ -181,7 +181,6 @@ public class KeyManagerImpl implements KeyManager {
private final boolean enableFileSystemPaths;
private BackgroundService dirDeletingService;
-
@VisibleForTesting
public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
OMMetadataManager metadataManager, OzoneConfiguration conf, String omId,
@@ -256,8 +255,7 @@ public class KeyManagerImpl implements KeyManager {
}
// Start directory deletion service for FSO buckets.
- if (OzoneManagerRatisUtils.isBucketFSOptimized()
- && dirDeletingService == null) {
+ if (dirDeletingService == null) {
long dirDeleteInterval = configuration.getTimeDuration(
OZONE_DIR_DELETING_SERVICE_INTERVAL,
OZONE_DIR_DELETING_SERVICE_INTERVAL_DEFAULT,
@@ -267,7 +265,7 @@ public class KeyManagerImpl implements KeyManager {
OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
TimeUnit.MILLISECONDS);
dirDeletingService = new DirectoryDeletingService(dirDeleteInterval,
- TimeUnit.SECONDS, serviceTimeout, ozoneManager);
+ TimeUnit.SECONDS, serviceTimeout, ozoneManager, configuration);
dirDeletingService.start();
}
}
@@ -649,7 +647,7 @@ public class KeyManagerImpl implements KeyManager {
bucketName);
OmKeyInfo value = null;
try {
- if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+ if (isBucketFSOptimized(volumeName, bucketName)) {
value = getOmKeyInfoFSO(volumeName, bucketName, keyName);
} else {
value = getOmKeyInfo(volumeName, bucketName, keyName);
@@ -1478,7 +1476,7 @@ public class KeyManagerImpl implements KeyManager {
if (replicationConfig == null) {
//if there are no parts, use the replicationType from the open key.
- if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+ if (isBucketFSOptimized(volumeName, bucketName)) {
multipartKey =
getMultipartOpenKeyFSO(volumeName, bucketName, keyName,
uploadID);
@@ -1526,11 +1524,12 @@ public class KeyManagerImpl implements KeyManager {
}
private String getPartName(PartKeyInfo partKeyInfo, String volName,
- String buckName, String keyName) {
+ String buckName, String keyName)
+ throws IOException {
String partName = partKeyInfo.getPartName();
- if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+ if (isBucketFSOptimized(volName, buckName)) {
String parentDir = OzoneFSUtils.getParentDir(keyName);
String partFileName =
OzoneFSUtils.getFileName(partKeyInfo.getPartName());
@@ -1712,7 +1711,7 @@ public class KeyManagerImpl implements KeyManager {
try {
OMFileRequest.validateBucket(metadataManager, volume, bucket);
String objectKey = metadataManager.getOzoneKey(volume, bucket, keyName);
- if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+ if (isBucketFSOptimized(volume, bucket)) {
keyInfo = getOmKeyInfoFSO(volume, bucket, keyName);
} else {
keyInfo = getOmKeyInfo(volume, bucket, keyName);
@@ -1925,7 +1924,7 @@ public class KeyManagerImpl implements KeyManager {
String bucketName = args.getBucketName();
String keyName = args.getKeyName();
- if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+ if (isBucketFSOptimized(volumeName, bucketName)) {
return getOzoneFileStatusFSO(volumeName, bucketName, keyName,
args.getSortDatanodes(), clientAddress,
args.getLatestVersionLocation(), false);
@@ -2202,7 +2201,7 @@ public class KeyManagerImpl implements KeyManager {
String bucketName = args.getBucketName();
String keyName = args.getKeyName();
OzoneFileStatus fileStatus;
- if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+ if (isBucketFSOptimized(volumeName, bucketName)) {
fileStatus = getOzoneFileStatusFSO(volumeName, bucketName, keyName,
args.getSortDatanodes(), clientAddress,
args.getLatestVersionLocation(), false);
@@ -2305,13 +2304,13 @@ public class KeyManagerImpl implements KeyManager {
String startKey, long numEntries, String clientAddress)
throws IOException {
Preconditions.checkNotNull(args, "Key args can not be null");
-
+ String volName = args.getVolumeName();
+ String buckName = args.getBucketName();
List<OzoneFileStatus> fileStatusList = new ArrayList<>();
if (numEntries <= 0) {
return fileStatusList;
}
-
- if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
+ if (isBucketFSOptimized(volName, buckName)) {
return listStatusFSO(args, recursive, startKey, numEntries,
clientAddress);
}
@@ -3093,4 +3092,22 @@ public class KeyManagerImpl implements KeyManager {
return files;
}
+
+ public boolean isBucketFSOptimized(String volName, String buckName)
+ throws IOException {
+ // This will never be null in reality but can be null in unit test cases.
+ // Added safer check for unit testcases.
+ if (ozoneManager == null) {
+ return false;
+ }
+ String buckKey =
+ ozoneManager.getMetadataManager().getBucketKey(volName, buckName);
+ OmBucketInfo buckInfo =
+ ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
+ if (buckInfo != null) {
+ return buckInfo.getBucketLayout()
+ .equals(BucketLayout.FILE_SYSTEM_OPTIMIZED);
+ }
+ return false;
+ }
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
index a854ca4..fdf1c61 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java
@@ -279,6 +279,8 @@ public class OmMetadataManagerImpl implements
OMMetadataManager {
@Override
public Table<String, OmKeyInfo> getKeyTable() {
+ // TODO: Refactor the below function by reading bucketLayout.
+ // Jira: HDDS-5636
if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
return fileTable;
}
@@ -302,6 +304,8 @@ public class OmMetadataManagerImpl implements
OMMetadataManager {
@Override
public Table<String, OmKeyInfo> getOpenKeyTable() {
+ // TODO: Refactor the below function by reading bucketLayout.
+ // Jira: HDDS-5636
if (OzoneManagerRatisUtils.isBucketFSOptimized()) {
return openFileTable;
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
index 43c23b9..3c1ce3d 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/TrashOzoneFileSystem.java
@@ -110,7 +110,7 @@ public class TrashOzoneFileSystem extends FileSystem {
ozoneManager.getMetrics().incNumTrashWriteRequests();
if (ozoneManager.isRatisEnabled()) {
OMClientRequest omClientRequest =
- OzoneManagerRatisUtils.createClientRequest(omRequest);
+ OzoneManagerRatisUtils.createClientRequest(omRequest, ozoneManager);
omRequest = omClientRequest.preExecute(ozoneManager);
RaftClientRequest req = getRatisRequest(omRequest);
ozoneManager.getOmRatisServer().submitRequest(omRequest, req);
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
index e655b28..25d4d0f 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/ratis/utils/OzoneManagerRatisUtils.java
@@ -31,6 +31,8 @@ import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.codec.OMDBDefinition;
import org.apache.hadoop.ozone.om.exceptions.OMException;
import org.apache.hadoop.hdds.utils.TransactionInfo;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.exceptions.OMLeaderNotReadyException;
import org.apache.hadoop.ozone.om.exceptions.OMNotLeaderException;
import
org.apache.hadoop.ozone.om.ratis.OzoneManagerRatisServer.RaftServerStatus;
@@ -140,8 +142,18 @@ public final class OzoneManagerRatisUtils {
* @return OMClientRequest
* @throws IOException
*/
- public static OMClientRequest createClientRequest(OMRequest omRequest) {
+ public static OMClientRequest createClientRequest(OMRequest omRequest,
+ OzoneManager ozoneManager) {
+
+ // Handling of exception by createClientRequest(OMRequest, OzoneManger):
+ // Either the code will take FSO or non FSO path, both classes has a
+ // validateAndUpdateCache() function which also contains
+ // validateBucketAndVolume() function which validates bucket and volume and
+ // throws necessary exceptions if required. validateAndUpdateCache()
+ // function has catch block which catches the exception if required and
+ // handles it appropriately.
Type cmdType = omRequest.getCmdType();
+ OzoneManagerProtocolProtos.KeyArgs keyArgs;
switch (cmdType) {
case CreateVolume:
return new OMVolumeCreateRequest(omRequest);
@@ -168,41 +180,55 @@ public final class OzoneManagerRatisUtils {
case SetBucketProperty:
return new OMBucketSetPropertyRequest(omRequest);
case AllocateBlock:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getAllocateBlockRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new OMAllocateBlockRequestWithFSO(omRequest);
}
return new OMAllocateBlockRequest(omRequest);
case CreateKey:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getCreateKeyRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new OMKeyCreateRequestWithFSO(omRequest);
}
return new OMKeyCreateRequest(omRequest);
case CommitKey:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getCommitKeyRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new OMKeyCommitRequestWithFSO(omRequest);
}
return new OMKeyCommitRequest(omRequest);
case DeleteKey:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getDeleteKeyRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new OMKeyDeleteRequestWithFSO(omRequest);
}
return new OMKeyDeleteRequest(omRequest);
case DeleteKeys:
return new OMKeysDeleteRequest(omRequest);
case RenameKey:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getRenameKeyRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new OMKeyRenameRequestWithFSO(omRequest);
}
return new OMKeyRenameRequest(omRequest);
case RenameKeys:
return new OMKeysRenameRequest(omRequest);
case CreateDirectory:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getCreateDirectoryRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new OMDirectoryCreateRequestWithFSO(omRequest);
}
return new OMDirectoryCreateRequest(omRequest);
case CreateFile:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getCreateFileRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new OMFileCreateRequestWithFSO(omRequest);
}
return new OMFileCreateRequest(omRequest);
@@ -211,29 +237,37 @@ public final class OzoneManagerRatisUtils {
case PurgePaths:
return new OMPathsPurgeRequestWithFSO(omRequest);
case InitiateMultiPartUpload:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getInitiateMultiPartUploadRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new S3InitiateMultipartUploadRequestWithFSO(omRequest);
}
return new S3InitiateMultipartUploadRequest(omRequest);
case CommitMultiPartUpload:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getCommitMultiPartUploadRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new S3MultipartUploadCommitPartRequestWithFSO(omRequest);
}
return new S3MultipartUploadCommitPartRequest(omRequest);
case AbortMultiPartUpload:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getAbortMultiPartUploadRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new S3MultipartUploadAbortRequestWithFSO(omRequest);
}
return new S3MultipartUploadAbortRequest(omRequest);
case CompleteMultiPartUpload:
- if (isBucketFSOptimized()) {
+ keyArgs = omRequest.getCompleteMultiPartUploadRequest().getKeyArgs();
+ if (isBucketFSOptimized(keyArgs.getVolumeName(), keyArgs.getBucketName(),
+ ozoneManager)) {
return new S3MultipartUploadCompleteRequestWithFSO(omRequest);
}
return new S3MultipartUploadCompleteRequest(omRequest);
case AddAcl:
case RemoveAcl:
case SetAcl:
- return getOMAclRequest(omRequest);
+ return getOMAclRequest(omRequest, ozoneManager);
case GetDelegationToken:
return new OMGetDelegationTokenRequest(omRequest);
case CancelDelegationToken:
@@ -258,7 +292,8 @@ public final class OzoneManagerRatisUtils {
}
}
- private static OMClientRequest getOMAclRequest(OMRequest omRequest) {
+ private static OMClientRequest getOMAclRequest(OMRequest omRequest,
+ OzoneManager ozoneManager) {
Type cmdType = omRequest.getCmdType();
if (Type.AddAcl == cmdType) {
ObjectType type = omRequest.getAddAclRequest().getObj().getResType();
@@ -267,10 +302,13 @@ public final class OzoneManagerRatisUtils {
} else if (ObjectType.BUCKET == type) {
return new OMBucketAddAclRequest(omRequest);
} else if (ObjectType.KEY == type) {
- if (isBucketFSOptimized()){
+ OMKeyAddAclRequest aclReq = new OMKeyAddAclRequest(omRequest);
+ BucketLayout bucketLayout = aclReq.getBucketLayout(ozoneManager);
+ if (BucketLayout.FILE_SYSTEM_OPTIMIZED
+ .equals(bucketLayout)) {
return new OMKeyAddAclRequestWithFSO(omRequest);
}
- return new OMKeyAddAclRequest(omRequest);
+ return aclReq;
} else {
return new OMPrefixAddAclRequest(omRequest);
}
@@ -281,10 +319,13 @@ public final class OzoneManagerRatisUtils {
} else if (ObjectType.BUCKET == type) {
return new OMBucketRemoveAclRequest(omRequest);
} else if (ObjectType.KEY == type) {
- if (isBucketFSOptimized()){
+ OMKeyRemoveAclRequest aclReq = new OMKeyRemoveAclRequest(omRequest);
+ BucketLayout bucketLayout = aclReq.getBucketLayout(ozoneManager);
+ if (BucketLayout.FILE_SYSTEM_OPTIMIZED
+ .equals(bucketLayout)) {
return new OMKeyRemoveAclRequestWithFSO(omRequest);
}
- return new OMKeyRemoveAclRequest(omRequest);
+ return aclReq;
} else {
return new OMPrefixRemoveAclRequest(omRequest);
}
@@ -295,10 +336,13 @@ public final class OzoneManagerRatisUtils {
} else if (ObjectType.BUCKET == type) {
return new OMBucketSetAclRequest(omRequest);
} else if (ObjectType.KEY == type) {
- if (isBucketFSOptimized()){
+ OMKeySetAclRequest aclReq = new OMKeySetAclRequest(omRequest);
+ BucketLayout bucketLayout = aclReq.getBucketLayout(ozoneManager);
+ if (BucketLayout.FILE_SYSTEM_OPTIMIZED
+ .equals(bucketLayout)) {
return new OMKeySetAclRequestWithFSO(omRequest);
}
- return new OMKeySetAclRequest(omRequest);
+ return aclReq;
} else {
return new OMPrefixSetAclRequest(omRequest);
}
@@ -437,4 +481,50 @@ public final class OzoneManagerRatisUtils {
return new ServiceException(leaderNotReadyException);
}
+ /**
+ * All the client requests are executed through
+ * OzoneManagerStateMachine#runCommand function and ensures sequential
+ * execution path.
+ * Below is the call trace to perform OM client request operation:
+ * OzoneManagerStateMachine#applyTransaction ->
+ * OzoneManagerStateMachine#runCommand ->
+ * OzoneManagerRequestHandler#handleWriteRequest ->
+ * OzoneManagerRatisUtils#createClientRequest ->
+ * OzoneManagerRatisUtils#getOmBucketInfo ->
+ * omMetadataManager().getBucketTable().get(buckKey)
+ */
+
+ private static OmBucketInfo getOmBucketInfo(OzoneManager ozoneManager,
+ OmBucketInfo buckInfo, String volName, String buckName) {
+ String buckKey =
+ ozoneManager.getMetadataManager().getBucketKey(volName, buckName);
+ try {
+ buckInfo =
+ ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
+ } catch (IOException e) {
+ LOG.debug("Failed to get the value for the key: " + buckKey);
+ }
+ return buckInfo;
+ }
+
+ private static BucketLayout getBucketLayout(OmBucketInfo buckInfo,
+ String volName, String buckName) {
+ if (buckInfo != null) {
+ return buckInfo.getBucketLayout();
+ } else {
+ LOG.error("Bucket not found: {}/{} ", volName, buckName);
+ // TODO: Handle bucket validation
+ }
+ return BucketLayout.LEGACY;
+ }
+
+ private static boolean isBucketFSOptimized(String volName, String buckName,
+ OzoneManager ozoneManager) {
+ BucketLayout bucketLayout = null;
+ OmBucketInfo buckInfo = null;
+ buckInfo = getOmBucketInfo(ozoneManager, buckInfo, volName, buckName);
+ bucketLayout = getBucketLayout(buckInfo, volName, buckName);
+ return BucketLayout.FILE_SYSTEM_OPTIMIZED.equals(bucketLayout);
+ }
+
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
index 8e8ed2b..7c1a641 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyDeleteRequest.java
@@ -22,6 +22,7 @@ import java.io.IOException;
import java.util.Map;
import com.google.common.base.Optional;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.util.OmResponseUtil;
@@ -86,10 +87,16 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
@SuppressWarnings("methodlength")
public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) {
+ return validateAndUpdateCache(ozoneManager, trxnLogIndex,
+ omDoubleBufferHelper, BucketLayout.DEFAULT);
+ }
+
+ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager,
+ long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper,
+ BucketLayout bucketLayout) {
DeleteKeyRequest deleteKeyRequest = getOmRequest().getDeleteKeyRequest();
- OzoneManagerProtocolProtos.KeyArgs keyArgs =
- deleteKeyRequest.getKeyArgs();
+ OzoneManagerProtocolProtos.KeyArgs keyArgs = deleteKeyRequest.getKeyArgs();
Map<String, String> auditMap = buildKeyArgsAuditMap(keyArgs);
String volumeName = keyArgs.getVolumeName();
@@ -102,14 +109,17 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
AuditLogger auditLogger = ozoneManager.getAuditLogger();
OzoneManagerProtocolProtos.UserInfo userInfo =
getOmRequest().getUserInfo();
- OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder(
- getOmRequest());
+ OMResponse.Builder omResponse =
+ OmResponseUtil.getOMResponseBuilder(getOmRequest());
OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager();
IOException exception = null;
boolean acquiredLock = false;
OMClientResponse omClientResponse = null;
Result result = null;
- OmBucketInfo omBucketInfo = null;
+ OmBucketInfo omBucketInfo =
+ OmBucketInfo.newBuilder().setVolumeName(volumeName)
+ .setBucketName(bucketName).setCreationTime(Time.now())
+ .setBucketLayout(bucketLayout).build();
try {
keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap);
volumeName = keyArgs.getVolumeName();
@@ -119,11 +129,11 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
checkKeyAcls(ozoneManager, volumeName, bucketName, keyName,
IAccessAuthorizer.ACLType.DELETE, OzoneObj.ResourceType.KEY);
- String objectKey = omMetadataManager.getOzoneKey(
- volumeName, bucketName, keyName);
+ String objectKey =
+ omMetadataManager.getOzoneKey(volumeName, bucketName, keyName);
- acquiredLock = omMetadataManager.getLock().acquireWriteLock(BUCKET_LOCK,
- volumeName, bucketName);
+ acquiredLock = omMetadataManager.getLock()
+ .acquireWriteLock(BUCKET_LOCK, volumeName, bucketName);
// Validate bucket and volume exists or not.
validateBucketAndVolume(omMetadataManager, volumeName, bucketName);
@@ -137,9 +147,8 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
omKeyInfo.setUpdateID(trxnLogIndex, ozoneManager.isRatisEnabled());
// Update table cache.
- omMetadataManager.getKeyTable().addCacheEntry(
- new CacheKey<>(omMetadataManager.getOzoneKey(volumeName, bucketName,
- keyName)),
+ omMetadataManager.getKeyTable().addCacheEntry(new CacheKey<>(
+ omMetadataManager.getOzoneKey(volumeName, bucketName, keyName)),
new CacheValue<>(Optional.absent(), trxnLogIndex));
omBucketInfo = getBucketInfo(omMetadataManager, volumeName, bucketName);
@@ -153,30 +162,29 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
// validation, so we don't need to add to cache.
// TODO: Revisit if we need it later.
- omClientResponse = new OMKeyDeleteResponse(omResponse
- .setDeleteKeyResponse(DeleteKeyResponse.newBuilder()).build(),
- omKeyInfo, ozoneManager.isRatisEnabled(),
+ omClientResponse = new OMKeyDeleteResponse(
+ omResponse.setDeleteKeyResponse(DeleteKeyResponse.newBuilder())
+ .build(), omKeyInfo, ozoneManager.isRatisEnabled(),
omBucketInfo.copyObject());
result = Result.SUCCESS;
} catch (IOException ex) {
result = Result.FAILURE;
exception = ex;
- omClientResponse = new OMKeyDeleteResponse(
- createErrorOMResponse(omResponse, exception));
+ omClientResponse =
+ new OMKeyDeleteResponse(createErrorOMResponse(omResponse,
exception));
} finally {
addResponseToDoubleBuffer(trxnLogIndex, omClientResponse,
- omDoubleBufferHelper);
+ omDoubleBufferHelper);
if (acquiredLock) {
- omMetadataManager.getLock().releaseWriteLock(BUCKET_LOCK, volumeName,
- bucketName);
+ omMetadataManager.getLock()
+ .releaseWriteLock(BUCKET_LOCK, volumeName, bucketName);
}
}
// Performing audit logging outside of the lock.
- auditLog(auditLogger, buildAuditMessage(OMAction.DELETE_KEY, auditMap,
- exception, userInfo));
-
+ auditLog(auditLogger,
+ buildAuditMessage(OMAction.DELETE_KEY, auditMap, exception, userInfo));
switch (result) {
case SUCCESS:
@@ -186,8 +194,8 @@ public class OMKeyDeleteRequest extends OMKeyRequest {
break;
case FAILURE:
omMetrics.incNumKeyDeleteFails();
- LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.",
- volumeName, bucketName, keyName, exception);
+ LOG.error("Key delete failed. Volume:{}, Bucket:{}, Key:{}.", volumeName,
+ bucketName, keyName, exception);
break;
default:
LOG.error("Unrecognized Result for OMKeyDeleteRequest: {}",
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
index 2fe0dd1..c224235 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/acl/OMKeyAclRequest.java
@@ -26,12 +26,15 @@ import org.apache.hadoop.ozone.audit.AuditLogger;
import org.apache.hadoop.ozone.om.OMMetadataManager;
import org.apache.hadoop.ozone.om.OzoneManager;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper;
import org.apache.hadoop.ozone.om.request.OMClientRequest;
import org.apache.hadoop.ozone.om.request.util.ObjectParser;
import org.apache.hadoop.ozone.om.response.OMClientResponse;
import org.apache.hadoop.ozone.om.response.key.acl.OMKeyAclResponse;
+import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneObj.ObjectType;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse;
@@ -39,6 +42,8 @@ import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer;
import org.apache.hadoop.ozone.security.acl.OzoneObj;
import org.apache.hadoop.hdds.utils.db.cache.CacheKey;
import org.apache.hadoop.hdds.utils.db.cache.CacheValue;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
import static
org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK;
@@ -47,6 +52,8 @@ import static
org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_L
*/
public abstract class OMKeyAclRequest extends OMClientRequest {
+ private static final Logger LOG = LoggerFactory
+ .getLogger(OMKeyAclRequest.class);
public OMKeyAclRequest(OMRequest omRequest) {
super(omRequest);
@@ -146,6 +153,37 @@ public abstract class OMKeyAclRequest extends
OMClientRequest {
*/
abstract String getPath();
+ public BucketLayout getBucketLayout(OzoneManager ozoneManager) {
+ BucketLayout bucketLayout = BucketLayout.LEGACY;
+ OmBucketInfo buckInfo = null;
+ try {
+ ObjectParser objectParser = new ObjectParser(getPath(),
+ OzoneManagerProtocolProtos.OzoneObj.ObjectType.KEY);
+
+ String volume = objectParser.getVolume();
+ String bucket = objectParser.getBucket();
+
+ String buckKey =
+ ozoneManager.getMetadataManager().getBucketKey(volume, bucket);
+
+ try {
+ buckInfo =
+ ozoneManager.getMetadataManager().getBucketTable().get(buckKey);
+ if (buckInfo == null) {
+ LOG.error("Bucket not found: {}/{} ", volume, bucket);
+ return BucketLayout.LEGACY;
+ }
+ bucketLayout = buckInfo.getBucketLayout();
+ } catch (IOException e) {
+ LOG.error("Failed to get bucket for the key: " + buckKey, e);
+ }
+ } catch (OMException ome) {
+ LOG.error("Invalid Path: " + getPath(), ome);
+ // Handle exception
+ }
+ return bucketLayout;
+ }
+
/**
* Get Key object Info from the request.
* @return OzoneObjInfo
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
index 7a25634..3be055e 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerProtocolServerSideTranslatorPB.java
@@ -134,7 +134,8 @@ public class OzoneManagerProtocolServerSideTranslatorPB
implements
} else {
checkLeaderStatus();
try {
- OMClientRequest omClientRequest = createClientRequest(request);
+ OMClientRequest omClientRequest =
+ createClientRequest(request, ozoneManager);
request = omClientRequest.preExecute(ozoneManager);
} catch (IOException ex) {
// As some of the preExecute returns error. So handle here.
@@ -212,12 +213,13 @@ public class OzoneManagerProtocolServerSideTranslatorPB
implements
if (OmUtils.isReadOnly(request)) {
return handler.handleReadRequest(request);
} else {
- OMClientRequest omClientRequest = createClientRequest(request);
+ OMClientRequest omClientRequest =
+ createClientRequest(request, ozoneManager);
request = omClientRequest.preExecute(ozoneManager);
index = transactionIndex.incrementAndGet();
omClientResponse = handler.handleWriteRequest(request, index);
}
- } catch(IOException ex) {
+ } catch (IOException ex) {
// As some of the preExecute returns error. So handle here.
return createErrorResponse(request, ex);
}
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
index 0295a5b..61587a3 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/protocolPB/OzoneManagerRequestHandler.java
@@ -240,11 +240,13 @@ public class OzoneManagerRequestHandler implements
RequestHandler {
@Override
public OMClientResponse handleWriteRequest(OMRequest omRequest,
long transactionLogIndex) {
- OMClientRequest omClientRequest =
- OzoneManagerRatisUtils.createClientRequest(omRequest);
- OMClientResponse omClientResponse =
- omClientRequest.validateAndUpdateCache(getOzoneManager(),
- transactionLogIndex, ozoneManagerDoubleBuffer::add);
+ OMClientRequest omClientRequest = null;
+ OMClientResponse omClientResponse = null;
+ omClientRequest =
+ OzoneManagerRatisUtils.createClientRequest(omRequest, impl);
+ omClientResponse = omClientRequest
+ .validateAndUpdateCache(getOzoneManager(), transactionLogIndex,
+ ozoneManagerDoubleBuffer::add);
return omClientResponse;
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
index facb82b..3b9e6a4 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/TestOMRequestUtils.java
@@ -45,6 +45,7 @@ import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs;
import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils;
import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos;
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
.MultipartUploadAbortRequest;
@@ -96,9 +97,17 @@ public final class TestOMRequestUtils {
public static void addVolumeAndBucketToDB(String volumeName,
String bucketName, OMMetadataManager omMetadataManager) throws Exception
{
+ addVolumeAndBucketToDB(volumeName, bucketName, omMetadataManager,
+ BucketLayout.DEFAULT);
+ }
+
+ public static void addVolumeAndBucketToDB(String volumeName,
+ String bucketName, OMMetadataManager omMetadataManager,
+ BucketLayout bucketLayout) throws Exception {
+
addVolumeToDB(volumeName, omMetadataManager);
- addBucketToDB(volumeName, bucketName, omMetadataManager);
+ addBucketToDB(volumeName, bucketName, omMetadataManager, bucketLayout);
}
@SuppressWarnings("parameterNumber")
@@ -429,9 +438,18 @@ public final class TestOMRequestUtils {
public static void addBucketToDB(String volumeName, String bucketName,
OMMetadataManager omMetadataManager) throws Exception {
+ addBucketToDB(volumeName, bucketName, omMetadataManager,
+ BucketLayout.DEFAULT);
+ }
+
+ public static void addBucketToDB(String volumeName, String bucketName,
+ OMMetadataManager omMetadataManager, BucketLayout bucketLayout)
+ throws Exception {
+
OmBucketInfo omBucketInfo =
OmBucketInfo.newBuilder().setVolumeName(volumeName)
- .setBucketName(bucketName).setCreationTime(Time.now()).build();
+ .setBucketName(bucketName).setCreationTime(Time.now())
+ .setBucketLayout(bucketLayout).build();
// Add to cache.
omMetadataManager.getBucketTable().addCacheEntry(
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
index b5af354..4d14be8 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequest.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.om.request.key;
import java.util.UUID;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.junit.Assert;
import org.junit.Test;
@@ -39,6 +40,8 @@ import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos
*/
public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
+ private BucketLayout bucketLayout = BucketLayout.DEFAULT;
+
@Test
public void testPreExecute() throws Exception {
doPreExecute(createDeleteKeyRequest());
@@ -48,7 +51,7 @@ public class TestOMKeyDeleteRequest extends TestOMKeyRequest {
public void testValidateAndUpdateCache() throws Exception {
// Add volume, bucket and key entries to OM DB.
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
- omMetadataManager);
+ omMetadataManager, bucketLayout);
String ozoneKey = addKeyToTable();
@@ -108,7 +111,7 @@ public class TestOMKeyDeleteRequest extends
TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
- 100L, ozoneManagerDoubleBufferHelper);
+ 100L, ozoneManagerDoubleBufferHelper, bucketLayout);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());
@@ -126,7 +129,7 @@ public class TestOMKeyDeleteRequest extends
TestOMKeyRequest {
OMClientResponse omClientResponse =
omKeyDeleteRequest.validateAndUpdateCache(ozoneManager,
- 100L, ozoneManagerDoubleBufferHelper);
+ 100L, ozoneManagerDoubleBufferHelper, bucketLayout);
Assert.assertEquals(OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND,
omClientResponse.getOMResponse().getStatus());
@@ -180,4 +183,8 @@ public class TestOMKeyDeleteRequest extends
TestOMKeyRequest {
OMRequest modifiedOmRequest) {
return new OMKeyDeleteRequest(modifiedOmRequest);
}
+
+ protected void setBucketlayout(BucketLayout buckLayout) {
+ this.bucketLayout = buckLayout;
+ }
}
diff --git
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
index 3686b6a..68f7557 100644
---
a/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
+++
b/hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyDeleteRequestWithFSO.java
@@ -22,9 +22,9 @@ import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
import org.apache.hadoop.ozone.om.OzonePrefixPathImpl;
import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.helpers.BucketLayout;
import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
import org.apache.hadoop.ozone.om.helpers.OzoneFileStatus;
-import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerRatisUtils;
import org.apache.hadoop.ozone.om.request.TestOMRequestUtils;
import
org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest;
import org.apache.hadoop.ozone.security.acl.OzonePrefixPath;
@@ -73,10 +73,8 @@ public class TestOMKeyDeleteRequestWithFSO extends
TestOMKeyDeleteRequest {
@Override
protected OzoneConfiguration getOzoneConfiguration() {
OzoneConfiguration config = super.getOzoneConfiguration();
- // Metadata layout prefix will be set while invoking OzoneManager#start()
- // and its not invoked in this test. Hence it is explicitly setting
- // this configuration to populate prefix tables.
- OzoneManagerRatisUtils.setBucketFSOptimized(true);
+ // Setting explicitly to FSO
+ super.setBucketlayout(BucketLayout.FILE_SYSTEM_OPTIMIZED);
return config;
}
@@ -84,7 +82,7 @@ public class TestOMKeyDeleteRequestWithFSO extends
TestOMKeyDeleteRequest {
public void testOzonePrefixPathViewer() throws Exception {
// Add volume, bucket and key entries to OM DB.
TestOMRequestUtils.addVolumeAndBucketToDB(volumeName, bucketName,
- omMetadataManager);
+ omMetadataManager, BucketLayout.FILE_SYSTEM_OPTIMIZED);
String ozoneKey = addKeyToTable();
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]