ivandika3 commented on code in PR #9206: URL: https://github.com/apache/ozone/pull/9206#discussion_r2514587332
########## hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCompleteResponse.java: ########## @@ -0,0 +1,438 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.multipart; + +import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor.ONE; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.UUID; +import org.apache.hadoop.hdds.client.RatisReplicationConfig; +import org.apache.hadoop.hdds.client.ReplicationConfig; +import org.apache.hadoop.ozone.om.helpers.OmBucketInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup; +import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; +import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.Test; + +/** + * Test multipart upload complete response for non-FSO (default) buckets. + */ +public class TestS3MultipartUploadCompleteResponse + extends TestS3MultipartResponse { + + @Test + public void testAddDBToBatch() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = getKeyName(); + String multipartUploadID = UUID.randomUUID().toString(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + long txnId = 50; + long objectId = getObjectId(); + String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName, + bucketName, keyName, multipartUploadID); + String dbMultipartOpenKey = getMultipartOpenKey(volumeName, bucketName, keyName, multipartUploadID); + long clientId = Time.now(); + + // add MPU entry to OpenKeyTable + S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = + getS3InitiateMPUResponse(volumeName, bucketName, keyName, + multipartUploadID); + + s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, + batchOperation); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + String dbOpenKey = getOpenKey(volumeName, bucketName, + keyName, clientId); + String dbKey = getDBKey(volumeName, bucketName, keyName); + OmKeyInfo omKeyInfo = getOmKeyInfo(txnId, objectId, volumeName, bucketName, + keyName, RatisReplicationConfig.getInstance(ONE), + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)); + + // add key to openFileTable + omKeyInfo.setKeyName(keyName); + addFileToKeyTable(keyName, omKeyInfo, clientId); + + addS3MultipartUploadCommitPartResponse(volumeName, bucketName, keyName, + multipartUploadID, dbOpenKey); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = + omMetadataManager.getBucketTable().get(bucketKey); + + assertNotNull(omMetadataManager.getMultipartInfoTable().get(dbMultipartKey)); + assertNotNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbMultipartOpenKey)); + + List<OmKeyInfo> unUsedParts = new ArrayList<>(); + S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = + getS3CompleteMPUResponse(volumeName, bucketName, keyName, multipartUploadID, + omKeyInfo, OzoneManagerProtocolProtos.Status.OK, unUsedParts, omBucketInfo); + + s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager, + batchOperation); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + assertNotNull(omMetadataManager.getKeyTable(getBucketLayout()).get(dbKey)); + assertNull(omMetadataManager.getMultipartInfoTable().get(dbMultipartKey)); + assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()) + .get(dbMultipartOpenKey)); + + // As no parts are created, so no entries should be there in delete table. + assertEquals(0, omMetadataManager.countRowsInTable( + omMetadataManager.getDeletedTable())); + } + + @Test + // similar to testAddDBToBatch(), but omBucketInfo is null + public void testAddDBToBatchWithNullBucketInfo() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = getKeyName(); + String multipartUploadID = UUID.randomUUID().toString(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + long txnId = 150; + long objectId = getObjectId(); + String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName, + bucketName, keyName, multipartUploadID); + String dbMultipartOpenKey = getMultipartOpenKey(volumeName, bucketName, keyName, multipartUploadID); + long clientId = Time.now(); + + // add MPU entry to OpenKeyTable + S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = + getS3InitiateMPUResponse(volumeName, bucketName, keyName, + multipartUploadID); + + s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, + batchOperation); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + String dbOpenKey = getOpenKey(volumeName, bucketName, + keyName, clientId); + String dbKey = getDBKey(volumeName, bucketName, keyName); + OmKeyInfo omKeyInfo = getOmKeyInfo(txnId, objectId, volumeName, bucketName, + keyName, RatisReplicationConfig.getInstance(ONE), + new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)); + + // add key to openFileTable + omKeyInfo.setKeyName(keyName); + addFileToKeyTable(keyName, omKeyInfo, clientId); + + addS3MultipartUploadCommitPartResponse(volumeName, bucketName, keyName, + multipartUploadID, dbOpenKey); + + assertNotNull( + omMetadataManager.getMultipartInfoTable().get(dbMultipartKey)); + assertNotNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(dbMultipartOpenKey)); + + // S3MultipartUploadCompleteResponse should accept null bucketInfo + List<OmKeyInfo> unUsedParts = new ArrayList<>(); + S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = + getS3CompleteMPUResponse(volumeName, bucketName, keyName, + multipartUploadID, omKeyInfo, + OzoneManagerProtocolProtos.Status.OK, unUsedParts, + null); + + s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager, + batchOperation); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + assertNotNull(omMetadataManager.getKeyTable(getBucketLayout()).get(dbKey)); + assertNull(omMetadataManager.getMultipartInfoTable().get(dbMultipartKey)); + assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()) + .get(dbMultipartOpenKey)); + + // As no parts are created, so no entries should be there in delete table. + assertEquals(0, omMetadataManager.countRowsInTable( + omMetadataManager.getDeletedTable())); + } + + @Test + public void testAddDBToBatchWithParts() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = getKeyName(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + createParentPath(volumeName, bucketName); + runAddDBToBatchWithParts(volumeName, bucketName, keyName, 0); + + // As 1 unused parts exists, so 1 unused entry should be there in delete + // table. + assertEquals(2, omMetadataManager.countRowsInTable( + omMetadataManager.getDeletedTable())); + } + + @Test + public void testAddDBToBatchWithPartsWithKeyInDeleteTable() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = getKeyName(); + + OmBucketInfo bucketInfo = OMRequestTestUtils.addVolumeAndBucketToDB( + volumeName, bucketName, omMetadataManager); + createParentPath(volumeName, bucketName); + + // Put an entry to delete table with the same key prior to multipart commit + OmKeyInfo prevKey = getOmKeyInfo(8, getObjectId(), volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), new OmKeyLocationInfoGroup(0L, new ArrayList<>(), true)); + RepeatedOmKeyInfo prevKeys = new RepeatedOmKeyInfo(prevKey, bucketInfo.getObjectID()); + String ozoneKey = omMetadataManager + .getOzoneKey(prevKey.getVolumeName(), prevKey.getBucketName(), prevKey.getKeyName()); + omMetadataManager.getDeletedTable().put(ozoneKey, prevKeys); + + long oId = runAddDBToBatchWithParts(volumeName, bucketName, keyName, 1); + + // Make sure new object isn't in delete table + RepeatedOmKeyInfo ds = omMetadataManager.getDeletedTable().get(ozoneKey); + for (OmKeyInfo omKeyInfo : ds.getOmKeyInfoList()) { + assertNotEquals(oId, omKeyInfo.getObjectID()); + } + + // As 1 unused parts and 1 previously put-and-deleted object exist, + // so 2 entries should be there in delete table. + assertEquals(3, omMetadataManager.countRowsInTable( + omMetadataManager.getDeletedTable())); + } + + protected long runAddDBToBatchWithParts(String volumeName, + String bucketName, String keyName, int deleteEntryCount) + throws Exception { + + String multipartUploadID = UUID.randomUUID().toString(); + String dbMultipartKey = omMetadataManager.getMultipartKey(volumeName, + bucketName, keyName, multipartUploadID); + String dbMultipartOpenKey = getMultipartOpenKey(volumeName, bucketName, keyName, multipartUploadID); + + S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = + getS3InitiateMPUResponse(volumeName, bucketName, keyName, + multipartUploadID); + s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, batchOperation); + + // Add some dummy parts for testing. + // Not added any key locations, as this just test is to see entries are + // adding to delete table or not. + OmMultipartKeyInfo omMultipartKeyInfo = + s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(); + + // After commits, it adds an entry to the deleted table. Incrementing the + // variable before the method call, because this method also has entry + // count check inside. + deleteEntryCount++; + OmKeyInfo omKeyInfo = commitS3MultipartUpload(volumeName, bucketName, + keyName, multipartUploadID, dbMultipartKey, + omMultipartKeyInfo, deleteEntryCount); + + String bucketKey = omMetadataManager.getBucketKey(volumeName, bucketName); + OmBucketInfo omBucketInfo = omMetadataManager.getBucketTable().get(bucketKey); + + OmKeyInfo omKeyInfoToRemove = getOmKeyInfo(100, getObjectId() + 8, volumeName, bucketName, keyName, + RatisReplicationConfig.getInstance(ONE), + new OmKeyLocationInfoGroup(0L, new ArrayList<>())); + + List<OmKeyInfo> unUsedParts = new ArrayList<>(); + unUsedParts.add(omKeyInfoToRemove); + S3MultipartUploadCompleteResponse s3MultipartUploadCompleteResponse = getS3CompleteMPUResponse( + volumeName, bucketName, keyName, + multipartUploadID, omKeyInfo, OzoneManagerProtocolProtos.Status.OK, + unUsedParts, omBucketInfo + ); + + s3MultipartUploadCompleteResponse.addToDBBatch(omMetadataManager, + batchOperation); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + String dbKey = getDBKey(volumeName, bucketName, omKeyInfo.getKeyName()); + assertNotNull( + omMetadataManager.getKeyTable(getBucketLayout()).get(dbKey)); + assertNull( + omMetadataManager.getMultipartInfoTable().get(dbMultipartKey)); + assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()) + .get(dbMultipartOpenKey)); + + return omKeyInfo.getObjectID(); + } + + @SuppressWarnings("parameterNumber") + private OmKeyInfo commitS3MultipartUpload(String volumeName, + String bucketName, String keyName, String multipartUploadID, + String multipartKey, + OmMultipartKeyInfo omMultipartKeyInfo, + int deleteEntryCount) throws IOException { + + PartKeyInfo part1 = createPartKeyInfo(volumeName, bucketName, keyName, 1); + + addPart(1, part1, omMultipartKeyInfo); + + long clientId = Time.now(); + String openKey = getOpenKey(volumeName, bucketName, keyName, clientId); + + S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse = + getS3CommitMPUResponse(volumeName, bucketName, + keyName, multipartUploadID, + omMultipartKeyInfo.getPartKeyInfo(1), + omMultipartKeyInfo, + OzoneManagerProtocolProtos.Status.OK, openKey); + + s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager, + batchOperation); + + assertNull( + omMetadataManager.getOpenKeyTable(getBucketLayout()).get(multipartKey)); + assertNull( + omMetadataManager.getMultipartInfoTable().get(multipartKey)); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + // As 1 parts are created, so 1 entry should be there in delete table. + assertEquals(deleteEntryCount, + omMetadataManager.countRowsInTable( + omMetadataManager.getDeletedTable())); + + String part1DeletedKeyName = omMetadataManager.getOzoneDeletePathKey( + omMultipartKeyInfo.getPartKeyInfo(1).getPartKeyInfo().getObjectID(), + multipartKey); + + assertNotNull(omMetadataManager.getDeletedTable().get( + part1DeletedKeyName)); + + RepeatedOmKeyInfo ro = + omMetadataManager.getDeletedTable().get(part1DeletedKeyName); + OmKeyInfo omPartKeyInfo = OmKeyInfo.getFromProtobuf(part1.getPartKeyInfo()); + assertEquals(omPartKeyInfo, ro.getOmKeyInfoList().get(0)); + + + return omPartKeyInfo; + } + + protected String getKeyName() { + return UUID.randomUUID().toString(); + } + + protected void createParentPath(String volumeName, String bucketName) + throws Exception { + // no parent hierarchy + } + + protected String getOpenKey(String volumeName, String bucketName, String keyName, long clientId) + throws IOException { + return omMetadataManager.getOpenKey(volumeName, bucketName, keyName, clientId); + } + + protected String getMultipartOpenKey(String volumeName, String bucketName, + String keyName, String multipartUploadID) throws IOException { + return omMetadataManager + .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID); + } Review Comment: I think we should use `OmMultipartUploadUtils#getMultipartOpenKey` since there are differences between FSO and non-FSO multipartKey. ########## hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/response/s3/multipart/TestS3MultipartUploadCommitPartResponse.java: ########## @@ -0,0 +1,250 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.ozone.om.response.s3.multipart; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.junit.jupiter.api.Assertions.assertEquals; +import static org.junit.jupiter.api.Assertions.assertNotNull; +import static org.junit.jupiter.api.Assertions.assertNull; + +import java.io.IOException; +import java.util.List; +import java.util.UUID; +import org.apache.hadoop.hdds.utils.db.Table; +import org.apache.hadoop.ozone.om.helpers.OmKeyInfo; +import org.apache.hadoop.ozone.om.helpers.OmMultipartKeyInfo; +import org.apache.hadoop.ozone.om.helpers.RepeatedOmKeyInfo; +import org.apache.hadoop.ozone.om.request.OMRequestTestUtils; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos; +import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.PartKeyInfo; +import org.apache.hadoop.util.Time; +import org.junit.jupiter.api.Test; + +/** + * Test multipart upload commit part response for non-FSO (default) buckets. + */ +public class TestS3MultipartUploadCommitPartResponse + extends TestS3MultipartResponse { + + @Test + public void testAddDBToBatch() throws Exception { + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = getKeyName(); + String multipartUploadID = UUID.randomUUID().toString(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + + createParentPath(volumeName, bucketName); + String multipartKey = omMetadataManager + .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID); + long clientId = Time.now(); + String openKey = getOpenKey(volumeName, bucketName, keyName, clientId); + + S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse = + getS3CommitMPUResponse(volumeName, bucketName, keyName, + multipartUploadID, null, null, + OzoneManagerProtocolProtos.Status.OK, openKey); + + s3MultipartUploadCommitPartResponse.addToDBBatch(omMetadataManager, + batchOperation); + + omMetadataManager.getStore().commitBatchOperation(batchOperation); + + assertNull(omMetadataManager.getOpenKeyTable(getBucketLayout()).get(openKey)); + assertNotNull(omMetadataManager.getMultipartInfoTable().get(multipartKey)); + + // As no parts are created, so no entries should be there in delete table. + assertEquals(0, omMetadataManager.countRowsInTable( + omMetadataManager.getDeletedTable())); + } + + @Test + public void testAddDBToBatchWithParts() throws Exception { + + String volumeName = UUID.randomUUID().toString(); + String bucketName = UUID.randomUUID().toString(); + String keyName = getKeyName(); + String multipartUploadID = UUID.randomUUID().toString(); + + OMRequestTestUtils.addVolumeAndBucketToDB(volumeName, bucketName, + omMetadataManager); + createParentPath(volumeName, bucketName); + + String multipartKey = omMetadataManager + .getMultipartKey(volumeName, bucketName, keyName, multipartUploadID); + + S3InitiateMultipartUploadResponse s3InitiateMultipartUploadResponse = + getS3InitiateMPUResponse(volumeName, bucketName, keyName, + multipartUploadID); + + s3InitiateMultipartUploadResponse.addToDBBatch(omMetadataManager, + batchOperation); + + // Add some dummy parts for testing. + // Not added any key locations, as this just test is to see entries are + // adding to delete table or not. + OmMultipartKeyInfo omMultipartKeyInfo = + s3InitiateMultipartUploadResponse.getOmMultipartKeyInfo(); + + PartKeyInfo part1 = createPartKeyInfo(volumeName, bucketName, keyName, 1); + + addPart(1, part1, omMultipartKeyInfo); + + long clientId = Time.now(); + + String openKey = getOpenKey(volumeName, bucketName, keyName, clientId); + + S3MultipartUploadCommitPartResponse s3MultipartUploadCommitPartResponse = + getS3CommitMPUResponse(volumeName, bucketName, keyName, + multipartUploadID, + omMultipartKeyInfo.getPartKeyInfo(1), + omMultipartKeyInfo, + OzoneManagerProtocolProtos.Status.OK, openKey); + + s3MultipartUploadCommitPartResponse.checkAndUpdateDB(omMetadataManager, + batchOperation); + + assertNull( + omMetadataManager.getOpenKeyTable(getBucketLayout()).get(openKey)); + assertNull( + omMetadataManager.getMultipartInfoTable().get(multipartKey)); Review Comment: Maybe I missed something, but this seems odd. `S3MultipartUploadCommitPartResponse` should persist to `multipartInfoTable`, so it should not be null. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
