[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r504133459 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ## @@ -466,27 +465,22 @@ public static void addDirectoryTableCacheEntries( * @param trxnLogIndex transaction log index * @return dbOmFileInfo, which keeps leaf node name in keyName field Review comment: Noted. Will take care in next PR. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r503628506 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r502734677 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileRequest.java ## @@ -453,4 +456,145 @@ public static void addDirectoryTableCacheEntries( } } + /** + * Adding Key info to the openFile Table cache. + * + * @param omMetadataManager OM Metadata Manager + * @param dbOpenFileNameopen file name key + * @param omFileInfokey info + * @param fileName file name + * @param trxnLogIndex transaction log index + */ + public static void addOpenFileTableCacheEntry( + OMMetadataManager omMetadataManager, String dbOpenFileName, + @Nullable OmKeyInfo omFileInfo, String fileName, long trxnLogIndex) { + +Optional keyInfoOptional = Optional.absent(); +if (omFileInfo != null) { + // New key format for the openFileTable. + // For example, the user given key path is '/a/b/c/d/e/file1', then in DB + // keyName field stores only the leaf node name, which is 'file1'. + OmKeyInfo dbOmFileInfo = omFileInfo.copyObject(); Review comment: OK:-) This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r502734533 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r502565993 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -227,6 +247,9 @@ protected OmMetadataManagerImpl() { @Override public Table getOpenKeyTable() { +if (enableFSPaths && OzoneManagerRatisUtils.isOmLayoutVersionV1()) { Review comment: Sure, I will add special handling in KeyCommit code while implementing KeyCreate request. Hope that make sense to you. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r502525148 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -227,6 +247,9 @@ protected OmMetadataManagerImpl() { @Override public Table getOpenKeyTable() { +if (enableFSPaths && OzoneManagerRatisUtils.isOmLayoutVersionV1()) { Review comment: Thanks @linyiqun for the comment. It seems the above comment was not clear. I had done few corrections to the above comment. Yes, V1 represents new key format. Adding background about `ozone.om.enable.filesystem.paths` -> this is the config to enable/disable enableFSPaths feature. Basically here the idea is to provide s3/fs inter-op. Please refer jira https://issues.apache.org/jira/browse/HDDS-4097 for more details. If the flag is enabled, then the user given key will be normalized and stored in FS semantics format by OM and it will be 100% FS semantics. If it is false, the key won't be normalized and it will be 100% S3 semantics. For example, user created a key "/dir1/dir2/dir3/file1" from S3 API. Now, if the flag is enabled the key will be normalized and create intermediate directories for the file1. _**More Details:-**_ The cases I mentioned above - **V1 feature version & enableFSPaths=true** is 100% FS semantics and **V1 feature version & enableFSPaths=false** is 100% S3 semantics Assume the key is /dir1/dir2/dir3/file-1. Again assume V1 feature version enabled and bucketId is 512. Now, **enableFSPaths=true**, which is 100% FS semantics. It stores as "512/dir1:1025", "1025/dir2:1026" and "1026/dir3:1027" into dirTable and "1027:file1" into openFiletable and on close move it to fileTable **enableFSPaths=false**, which is 100% S3 semantics. It stores as "512/dir1/dir2/dir3/file1:1025" into openFileTable and on close move it to fileTable. Here still maintains the parentID/Key format, but the key will be the fullpath and not a normalized path. Here the key can be anything like `/dir1dir2dir3///file1`. Please let me know if any more details needed. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r502350594 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -227,6 +247,9 @@ protected OmMetadataManagerImpl() { @Override public Table getOpenKeyTable() { +if (enableFSPaths && OzoneManagerRatisUtils.isOmLayoutVersionV1()) { Review comment: Good catch @bharatviswa504. Please feel free to add if anything else needed. Thanks again! Based on our offline discussions, below is the expected behavior for diff requests: **V1 feature version** : Following ops shouldn't depend on enableFSPaths flag 1) FileCreate -> Look into dirTable for parents. Then create entries in openFileTable and on close add it to fileTable. 2) DirCreate -> Create entries in dirTable 3) File/DirDelete -> Look into fileTable and dirTable for the keys. 4) File/DirRename-> Look into fileTable and dirTable for the keys. **V1 feature version & enableFSPaths=true** 1) KeyCreate ---> Look into dirTable for parents. Create entries in openFileTable and on close add it to fileTable. 2) KeyDelete ---> Look into fileTable and dirTable for the keys. 3) KeyRename -> supported only in ozone shell. It should look into fileTable and dirTable for the keys. **V1 feature version & enableFSPaths=false** 1) KeyCreate ---> Create entries in openFileTable and on close add it to fileTable, but the parentId is the bucketId and the key "dir1/dir2/dir3/file1" will be stored into fileTable like "512/dir1/dir2/dir3/file1". Assume bucketId is 512. 2) KeyDelete ---> Look into fileTable for the keys. 3) KeyRename -> supported only in ozone shell. It should look into fileTable for the keys. In this PR, will handle only `FileCreate` request and not provided checks for enableFSPaths in KeyCommit. Will do this changes in latest commit. Later, I will raise subsequent jiras for handling KeyCreate/KeyCommit and other ops. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501645890 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r502350594 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -227,6 +247,9 @@ protected OmMetadataManagerImpl() { @Override public Table getOpenKeyTable() { +if (enableFSPaths && OzoneManagerRatisUtils.isOmLayoutVersionV1()) { Review comment: Good catch @bharatviswa504. Please feel free to add if anything else needed. Thanks again! Based on our offline discussions, below is the expected behavior for diff requests: **V1 feature version** : Following ops shouldn't depend on enableFSPaths flag 1) FileCreate -> Look into dirTable for parents. Then create entries in openFileTable and on close add it to fileTable. 2) DirCreate -> Create entries in dirTable 3) File/DirDelete -> Look into fileTable and dirTable for the keys. 4) File/DirRename-> Look into fileTable and dirTable for the keys. **V1 feature version & enableFSPaths=true** 1) KeyCreate ---> Look into dirTable for parents. Create entries in openFileTable and on close add it to fileTable. 2) KeyDelete ---> Look into fileTable and dirTable for the keys. 3) KeyRename -> supported only in ozone shell. It should look into fileTable and dirTable for the keys. **V1 feature version & enableFSPaths=false** 1) KeyCreate ---> Create entries in openKeyTable and on close add it to keyTable. 2) KeyDelete ---> Look into keyTable for the keys. 3) KeyRename -> supported only in ozone shell. It should look into keyTable for the keys. In this PR, will handle only `FileCreate` request and not provided checks for enableFSPaths in KeyCommit. Will do this changes in latest commit. Later, I will raise subsequent jiras for handling KeyCreate/KeyCommit and other ops. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501645890 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501436585 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501423744 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java ## @@ -0,0 +1,275 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.NOT_A_FILE; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles CommitKey request layout version V1. + */ +public class OMKeyCommitRequestV1 extends OMKeyCommitRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyCommitRequestV1.class); + + public OMKeyCommitRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); + +KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs(); + +String volumeName = commitKeyArgs.getVolumeName(); +String bucketName = commitKeyArgs.getBucketName(); +String keyName = commitKeyArgs.getKeyName(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumKeyCommits(); + +AuditLogger auditLogger = ozoneManager.getAuditLogger(); + +Map auditMap = buildKeyArgsAuditMap(commitKeyArgs); + +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); + +IOException exception = null; +OmKeyInfo omKeyInfo = null; +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +OMClientResponse omClientResponse = null; +boolean bucketLockAcquired = false; +Result result; + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +try { + commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap); + volumeName = commitKeyArgs.getVolumeName(); + bucketName = commitKeyArgs.getBucketName(); + + // check Acl + checkKeyAc
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501215658 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501207900 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501204848 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r501022137 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequest.java ## @@ -253,4 +253,33 @@ public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, return omClientResponse; } + + protected void processResult(CommitKeyRequest commitKeyRequest, Review comment: oops, thanks a lot @linyiqun for the detailed code reviews. I have fixed this in my latest commit. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500920112 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -129,6 +132,10 @@ * |--| * | directoryTable| parentId/directoryName -> DirectoryInfo | * |--| + * | fileTable | parentId/fileName/id -> FileInfo| + * |--| + * | openFileTable | parentId/fileName -> FileInfo | Review comment: Thanks again. Done in latest commit ## File path: hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequest.java ## @@ -146,7 +141,7 @@ public void testValidateAndUpdateCache() throws Exception { // Check open table whether key is added or not. -omKeyInfo = omMetadataManager.getOpenKeyTable().get(openKey); +omKeyInfo = verifyPathInOpenKeyTable(keyName, id, true); Assert.assertNotNull(omKeyInfo); Review comment: Done in latest commit This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500919915 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java ## @@ -0,0 +1,260 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles CommitKey request. + */ +public class OMKeyCommitRequestV1 extends OMKeyCommitRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyCommitRequestV1.class); + + public OMKeyCommitRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); + +KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs(); + +String volumeName = commitKeyArgs.getVolumeName(); +String bucketName = commitKeyArgs.getBucketName(); +String keyName = commitKeyArgs.getKeyName(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumKeyCommits(); + +AuditLogger auditLogger = ozoneManager.getAuditLogger(); + +Map auditMap = buildKeyArgsAuditMap(commitKeyArgs); + +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); + +IOException exception = null; +OmKeyInfo omKeyInfo = null; +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +OMClientResponse omClientResponse = null; +boolean bucketLockAcquired = false; +Result result; + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +try { + commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap); + volumeName = commitKeyArgs.getVolumeName(); + bucketName = commitKeyArgs.getBucketName(); + + // check Acl + checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, + keyName, IAccessAuthorizer.ACLType.WRITE, + commitKeyRequest.getClientID()); + + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + Iterator pathComponents = Paths.get(keyName).iterator(); + String dbOpenFileKey = null; + + List locationInfoList = new ArrayList<>(); + for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) { +locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); + } + + bucketLockAcquired = + omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + volumeName, buck
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500919741 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java ## @@ -0,0 +1,260 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles CommitKey request. + */ +public class OMKeyCommitRequestV1 extends OMKeyCommitRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyCommitRequestV1.class); + + public OMKeyCommitRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); + +KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs(); + +String volumeName = commitKeyArgs.getVolumeName(); +String bucketName = commitKeyArgs.getBucketName(); +String keyName = commitKeyArgs.getKeyName(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumKeyCommits(); + +AuditLogger auditLogger = ozoneManager.getAuditLogger(); + +Map auditMap = buildKeyArgsAuditMap(commitKeyArgs); + +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); + +IOException exception = null; +OmKeyInfo omKeyInfo = null; +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +OMClientResponse omClientResponse = null; +boolean bucketLockAcquired = false; +Result result; + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +try { + commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap); + volumeName = commitKeyArgs.getVolumeName(); + bucketName = commitKeyArgs.getBucketName(); + + // check Acl + checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, + keyName, IAccessAuthorizer.ACLType.WRITE, + commitKeyRequest.getClientID()); + + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + Iterator pathComponents = Paths.get(keyName).iterator(); + String dbOpenFileKey = null; + + List locationInfoList = new ArrayList<>(); + for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) { +locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); + } + + bucketLockAcquired = + omMetadataManager.getLock().acquireLock(BUCKET_LOCK, Review comment: Good one. Done
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500919600 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/key/OMKeyCommitRequestV1.java ## @@ -0,0 +1,260 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.key; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.AuditLogger; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponse; +import org.apache.hadoop.ozone.om.response.key.OMKeyCommitResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Path; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.Iterator; +import java.util.List; +import java.util.Map; + +import static org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes.KEY_NOT_FOUND; +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; + +/** + * Handles CommitKey request. + */ +public class OMKeyCommitRequestV1 extends OMKeyCommitRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMKeyCommitRequestV1.class); + + public OMKeyCommitRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CommitKeyRequest commitKeyRequest = getOmRequest().getCommitKeyRequest(); + +KeyArgs commitKeyArgs = commitKeyRequest.getKeyArgs(); + +String volumeName = commitKeyArgs.getVolumeName(); +String bucketName = commitKeyArgs.getBucketName(); +String keyName = commitKeyArgs.getKeyName(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumKeyCommits(); + +AuditLogger auditLogger = ozoneManager.getAuditLogger(); + +Map auditMap = buildKeyArgsAuditMap(commitKeyArgs); + +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); + +IOException exception = null; +OmKeyInfo omKeyInfo = null; +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +OMClientResponse omClientResponse = null; +boolean bucketLockAcquired = false; +Result result; + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +try { + commitKeyArgs = resolveBucketLink(ozoneManager, commitKeyArgs, auditMap); + volumeName = commitKeyArgs.getVolumeName(); + bucketName = commitKeyArgs.getBucketName(); + + // check Acl + checkKeyAclsInOpenKeyTable(ozoneManager, volumeName, bucketName, + keyName, IAccessAuthorizer.ACLType.WRITE, + commitKeyRequest.getClientID()); + + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + Iterator pathComponents = Paths.get(keyName).iterator(); + String dbOpenFileKey = null; + + List locationInfoList = new ArrayList<>(); + for (KeyLocation keyLocation : commitKeyArgs.getKeyLocationsList()) { +locationInfoList.add(OmKeyLocationInfo.getFromProtobuf(keyLocation)); + } + + bucketLockAcquired = + omMetadataManager.getLock().acquireLock(BUCKET_LOCK, + volumeName, buck
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500919334 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500914750 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -227,6 +247,9 @@ protected OmMetadataManagerImpl() { @Override public Table getOpenKeyTable() { +if (enableFSPaths && OzoneManagerRatisUtils.isOmLayoutVersionV1()) { Review comment: The idea here is, will use the tables only if `enableFSPaths` is true and with `V1` version. This new tables will contain only the metadata in new format keys for better debugging/maintenance. All the keys with old format will go to existing tables itself. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500914750 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -227,6 +247,9 @@ protected OmMetadataManagerImpl() { @Override public Table getOpenKeyTable() { +if (enableFSPaths && OzoneManagerRatisUtils.isOmLayoutVersionV1()) { Review comment: The idea here is, will use the new tables only if enableFSPaths is true and with V1 version. This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500913715 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,289 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; +int numKeysCreated = 0; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, +
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500911479 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,283 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import com.google.common.base.Optional; +import org.apache.hadoop.hdds.utils.db.cache.CacheKey; +import org.apache.hadoop.hdds.utils.db.cache.CacheValue; +import org.apache.hadoop.ozone.audit.OMAction; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.OMMetrics; +import org.apache.hadoop.ozone.om.OzoneManager; +import org.apache.hadoop.ozone.om.exceptions.OMException; +import org.apache.hadoop.ozone.om.helpers.*; +import org.apache.hadoop.ozone.om.ratis.utils.OzoneManagerDoubleBufferHelper; +import org.apache.hadoop.ozone.om.request.util.OmResponseUtil; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponse; +import org.apache.hadoop.ozone.om.response.file.OMFileCreateResponseV1; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.*; +import org.apache.hadoop.ozone.security.acl.IAccessAuthorizer; +import org.apache.hadoop.ozone.security.acl.OzoneObj; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import java.io.IOException; +import java.nio.file.Paths; +import java.util.ArrayList; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; + +import static org.apache.hadoop.ozone.om.lock.OzoneManagerLock.Resource.BUCKET_LOCK; +import static org.apache.hadoop.ozone.om.request.file.OMFileRequest.OMDirectoryResult.*; + +/** + * Handles create file request layout version1. + */ +public class OMFileCreateRequestV1 extends OMFileCreateRequest { + + private static final Logger LOG = + LoggerFactory.getLogger(OMFileCreateRequestV1.class); + public OMFileCreateRequestV1(OMRequest omRequest) { +super(omRequest); + } + + @Override + @SuppressWarnings("methodlength") + public OMClientResponse validateAndUpdateCache(OzoneManager ozoneManager, + long trxnLogIndex, OzoneManagerDoubleBufferHelper omDoubleBufferHelper) { + +CreateFileRequest createFileRequest = getOmRequest().getCreateFileRequest(); +KeyArgs keyArgs = createFileRequest.getKeyArgs(); +Map auditMap = buildKeyArgsAuditMap(keyArgs); + +String volumeName = keyArgs.getVolumeName(); +String bucketName = keyArgs.getBucketName(); +String keyName = keyArgs.getKeyName(); + +// if isRecursive is true, file would be created even if parent +// directories does not exist. +boolean isRecursive = createFileRequest.getIsRecursive(); +if (LOG.isDebugEnabled()) { + LOG.debug("File create for : " + volumeName + "/" + bucketName + "/" + + keyName + ":" + isRecursive); +} + +// if isOverWrite is true, file would be over written. +boolean isOverWrite = createFileRequest.getIsOverwrite(); + +OMMetrics omMetrics = ozoneManager.getMetrics(); +omMetrics.incNumCreateFile(); + +OMMetadataManager omMetadataManager = ozoneManager.getMetadataManager(); + +boolean acquiredLock = false; + +OmVolumeArgs omVolumeArgs = null; +OmBucketInfo omBucketInfo = null; +final List locations = new ArrayList<>(); +List missingParentInfos; + +OMClientResponse omClientResponse = null; +OMResponse.Builder omResponse = OmResponseUtil.getOMResponseBuilder( +getOmRequest()); +IOException exception = null; +Result result = null; +try { + keyArgs = resolveBucketLink(ozoneManager, keyArgs, auditMap); + volumeName = keyArgs.getVolumeName(); + bucketName = keyArgs.getBucketName(); + + if (keyName.length() == 0) { +// Check if this is the root of the filesystem. +throw new OMException("Can not write to directory: " + keyName, +OMException.ResultCodes.NOT_A_FILE); + } + + // check Acl + checkKeyAcls(ozoneManager, volumeName, bucketName, keyName, + IAccessAuthorizer.ACLType
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500911297 ## File path: hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java ## @@ -126,6 +144,23 @@ public void setDataSize(long size) { this.dataSize = size; } + public void setFileName(String fileName) { +this.fileName = fileName; + } + + public String getFileName() { +return fileName; + } + + public void setParentObjectID(long parentObjectID) { +this.parentObjectID = parentObjectID; + } + + public long getParentObjectID() { +return parentObjectID; + } + Review comment: Done! This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500910910 ## File path: hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/file/TestOMFileCreateRequestV1.java ## @@ -0,0 +1,456 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.request.file; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.request.key.TestOMKeyRequestV1; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CreateFileRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.StringUtils; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Test; + +import java.util.List; +import java.util.UUID; + +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.VOLUME_NOT_FOUND; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.BUCKET_NOT_FOUND; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.FILE_ALREADY_EXISTS; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.NOT_A_FILE; +import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Status.DIRECTORY_NOT_FOUND; + +/** + * Tests OMFileCreateRequest V1 layout version. + */ +public class TestOMFileCreateRequestV1 extends TestOMKeyRequestV1 { Review comment: Great idea. Done in the latest commit. ## File path: hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/request/key/TestOMKeyCommitRequestV1.java ## @@ -0,0 +1,379 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + + +package org.apache.hadoop.ozone.om.request.key; + +import org.apache.hadoop.hdds.protocol.proto.HddsProtos; +import org.apache.hadoop.ozone.OzoneConsts; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo; +import org.apache.hadoop.ozone.om.helpers.OzoneFSUtils; +import org.apache.hadoop.ozone.om.request.TestOMRequestUtils; +import org.apache.hadoop.ozone.om.response.OMClientResponse; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.CommitKeyRequest; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.KeyLocation; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMRequest; +import org.apache.hadoop.util.Time; +import org.junit.Assert; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import java.util.stream.Collectors; + +/** + * Class tes
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500909870 ## File path: hadoop-ozone/interface-storage/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java ## @@ -399,4 +399,15 @@ String getMultipartKey(String volume, String bucket, String key, String * @return DB directory key as String. */ String getOzonePathKey(long parentObjectId, String pathComponentName); Review comment: Done! ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -129,6 +132,8 @@ * |--| * | directoryTable| parentId/directoryName -> DirectoryInfo | * |--| + * | fileTable | parentId/fileName -> FileInfo | + * |--| Review comment: Done! ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java ## @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.file; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Response for create file request layout version1. + */ +public class OMFileCreateResponseV1 extends OMFileCreateResponse { + + private List parentDirInfos; + + public OMFileCreateResponseV1(@Nonnull OMResponse omResponse, +@Nonnull OmKeyInfo omKeyInfo, +@Nonnull List parentDirInfos, +long openKeySessionID, +@Nonnull OmVolumeArgs omVolumeArgs, +@Nonnull OmBucketInfo omBucketInfo) { +super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID, +omVolumeArgs, omBucketInfo); +this.parentDirInfos = parentDirInfos; + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataMgr, + BatchOperation batchOp) throws IOException { + +/** + * Create parent directory entries during Key Create - do not wait + * for Key Commit request. + * XXX handle stale directory entries. + */ +if (parentDirInfos != null) { + for (OmDirectoryInfo parentKeyInfo : parentDirInfos) { +String parentKey = omMetadataMgr.getOzonePathKey( +parentKeyInfo.getParentObjectID(), parentKeyInfo.getName()); Review comment: Done! ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java ## @@ -346,6 +369,7 @@ protected static DBStoreBuilder addOMTablesAndCodecs(DBStoreBuilder builder) { .addCodec(S3SecretValue.class, new S3SecretValueCodec()) .addCodec(OmPrefixInfo.class, new OmPrefixInfoCodec()) .addCodec(OmDirectoryInfo.class, new OmDirectoryInfoCodec()) + Review comment: Done! ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/file/OMFileCreateRequestV1.java ## @@ -0,0 +1,283 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obta
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500910318 ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java ## @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.file; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OMResponse; + +import javax.annotation.Nonnull; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +/** + * Response for create file request layout version1. + */ +public class OMFileCreateResponseV1 extends OMFileCreateResponse { + + private List parentDirInfos; + + public OMFileCreateResponseV1(@Nonnull OMResponse omResponse, +@Nonnull OmKeyInfo omKeyInfo, +@Nonnull List parentDirInfos, +long openKeySessionID, +@Nonnull OmVolumeArgs omVolumeArgs, +@Nonnull OmBucketInfo omBucketInfo) { +super(omResponse, omKeyInfo, new ArrayList<>(), openKeySessionID, +omVolumeArgs, omBucketInfo); +this.parentDirInfos = parentDirInfos; + } + + @Override + protected void addToDBBatch(OMMetadataManager omMetadataMgr, + BatchOperation batchOp) throws IOException { + +/** + * Create parent directory entries during Key Create - do not wait + * for Key Commit request. + * XXX handle stale directory entries. + */ +if (parentDirInfos != null) { + for (OmDirectoryInfo parentKeyInfo : parentDirInfos) { +String parentKey = omMetadataMgr.getOzonePathKey( +parentKeyInfo.getParentObjectID(), parentKeyInfo.getName()); +if (LOG.isDebugEnabled()) { + LOG.debug("putWithBatch adding parent : key {} info : {}", parentKey, + parentKeyInfo); +} +omMetadataMgr.getDirectoryTable().putWithBatch(batchOp, parentKey, +parentKeyInfo); + } +} + +String openKey = omMetadataMgr.getOpenFileName( +getOmKeyInfo().getParentObjectID(), getOmKeyInfo().getFileName(), +getOpenKeySessionID()); +omMetadataMgr.getOpenKeyTable().putWithBatch(batchOp, openKey, +getOmKeyInfo()); Review comment: Done! ## File path: hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/response/file/OMFileCreateResponseV1.java ## @@ -0,0 +1,81 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.file; + +import org.apache.hadoop.hdds.utils.db.BatchOperation; +import org.apache.hadoop.ozone.om.OMMetadataManager; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmDirectoryInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmVolumeArgs; +import org.apache.hadoop.ozone.protocol.prot
[GitHub] [hadoop-ozone] rakeshadr commented on a change in pull request #1473: HDDS-4266: CreateFile : store parent dir entries into DirTable and file entry into separate FileTable
rakeshadr commented on a change in pull request #1473: URL: https://github.com/apache/hadoop-ozone/pull/1473#discussion_r500470079 ## File path: hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/helpers/OmKeyInfo.java ## @@ -413,7 +461,8 @@ public KeyInfo getProtobuf(boolean ignorePipeline) { .addAllMetadata(KeyValueUtil.toProtobuf(metadata)) .addAllAcls(OzoneAclUtil.toProtobuf(acls)) .setObjectID(objectID) -.setUpdateID(updateID); +.setUpdateID(updateID) +.setParentID(parentObjectID); Review comment: I am not persisting fileName, which is already the last name in the path component(keyName). I have added logic to prepare filename from Keyname. Hope this is fine? This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org - To unsubscribe, e-mail: ozone-issues-unsubscr...@hadoop.apache.org For additional commands, e-mail: ozone-issues-h...@hadoop.apache.org