linyiqun commented on a change in pull request #1923:
URL: https://github.com/apache/ozone/pull/1923#discussion_r576591831
##########
File path:
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
##########
@@ -264,12 +274,232 @@ public void testUploadPartOverrideWithRatis() throws
IOException {
commitUploadPartInfo = ozoneOutputStream
.getCommitUploadPartInfo();
- assertNotNull(commitUploadPartInfo);
- assertNotNull(commitUploadPartInfo.getPartName());
+ Assert.assertNotNull(commitUploadPartInfo);
+ Assert.assertNotNull(commitUploadPartInfo.getPartName());
// PartName should be different from old part Name.
- assertNotEquals("Part names should be different", partName,
+ Assert.assertNotEquals("Part names should be different", partName,
commitUploadPartInfo.getPartName());
}
+ @Test
+ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ // Initiate multipart upload
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ // Upload Parts
+ Map<Integer, String> partsMap = new TreeMap<>();
+ // Uploading part 1 with less than min size
+ String partName = uploadPart(bucket, keyName, uploadID, 1,
+ "data".getBytes(UTF_8));
+ partsMap.put(1, partName);
+
+ partName = uploadPart(bucket, keyName, uploadID, 2,
+ "data".getBytes(UTF_8));
+ partsMap.put(2, partName);
+
+ // Complete multipart upload
+ OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL,
+ () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+ }
+
+ @Test
+ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
+ throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ // We have not uploaded any parts, but passing some list it should throw
+ // error.
+ TreeMap<Integer, String> partsMap = new TreeMap<>();
+ partsMap.put(1, UUID.randomUUID().toString());
+
+ OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+ () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+ }
+
+ @Test
+ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
+ throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+ // We have not uploaded any parts, but passing some list it should throw
+ // error.
+ TreeMap<Integer, String> partsMap = new TreeMap<>();
+ partsMap.put(1, UUID.randomUUID().toString());
Review comment:
The comment should be updated, this case is passing with an incorrect
part name.
##########
File path:
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
##########
@@ -264,12 +274,232 @@ public void testUploadPartOverrideWithRatis() throws
IOException {
commitUploadPartInfo = ozoneOutputStream
.getCommitUploadPartInfo();
- assertNotNull(commitUploadPartInfo);
- assertNotNull(commitUploadPartInfo.getPartName());
+ Assert.assertNotNull(commitUploadPartInfo);
+ Assert.assertNotNull(commitUploadPartInfo.getPartName());
// PartName should be different from old part Name.
- assertNotEquals("Part names should be different", partName,
+ Assert.assertNotEquals("Part names should be different", partName,
commitUploadPartInfo.getPartName());
}
+ @Test
+ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ // Initiate multipart upload
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ // Upload Parts
+ Map<Integer, String> partsMap = new TreeMap<>();
+ // Uploading part 1 with less than min size
+ String partName = uploadPart(bucket, keyName, uploadID, 1,
+ "data".getBytes(UTF_8));
+ partsMap.put(1, partName);
+
+ partName = uploadPart(bucket, keyName, uploadID, 2,
+ "data".getBytes(UTF_8));
+ partsMap.put(2, partName);
+
+ // Complete multipart upload
+ OzoneTestUtils.expectOmException(OMException.ResultCodes.ENTITY_TOO_SMALL,
+ () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+ }
+
+ @Test
+ public void testMultipartUploadWithPartsMisMatchWithListSizeDifferent()
+ throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ // We have not uploaded any parts, but passing some list it should throw
+ // error.
+ TreeMap<Integer, String> partsMap = new TreeMap<>();
+ partsMap.put(1, UUID.randomUUID().toString());
+
+ OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+ () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+ }
+
+ @Test
+ public void testMultipartUploadWithPartsMisMatchWithIncorrectPartName()
+ throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+ // We have not uploaded any parts, but passing some list it should throw
+ // error.
+ TreeMap<Integer, String> partsMap = new TreeMap<>();
+ partsMap.put(1, UUID.randomUUID().toString());
+
+ OzoneTestUtils.expectOmException(OMException.ResultCodes.INVALID_PART,
+ () -> completeMultipartUpload(bucket, keyName, uploadID, partsMap));
+ }
+
+ @Test
+ public void testMultipartUploadWithMissingParts() throws Exception {
+ String volumeName = UUID.randomUUID().toString();
+ String bucketName = UUID.randomUUID().toString();
+ String keyName = UUID.randomUUID().toString();
+
+ store.createVolume(volumeName);
+ OzoneVolume volume = store.getVolume(volumeName);
+ volume.createBucket(bucketName);
+ OzoneBucket bucket = volume.getBucket(bucketName);
+
+ String uploadID = initiateMultipartUpload(bucket, keyName, STAND_ALONE,
+ ONE);
+
+ uploadPart(bucket, keyName, uploadID, 1, "data".getBytes(UTF_8));
+
+ // We have not uploaded any parts, but passing some list it should throw
+ // error.
+ TreeMap<Integer, String> partsMap = new TreeMap<>();
+ partsMap.put(3, "random");
Review comment:
The comment should be updated, this case is passing with an incorrect
part number.
##########
File path:
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/request/s3/multipart/S3MultipartUploadCompleteRequest.java
##########
@@ -351,8 +267,151 @@ public OMClientResponse
validateAndUpdateCache(OzoneManager ozoneManager,
LOG.error("Unrecognized Result for S3MultipartUploadCommitRequest: {}",
multipartUploadCompleteRequest);
}
+ }
- return omClientResponse;
+ @SuppressWarnings("checkstyle:ParameterNumber")
+ protected OmKeyInfo getOmKeyInfo(OzoneManager ozoneManager, long
trxnLogIndex,
+ KeyArgs keyArgs, String volumeName, String bucketName, String keyName,
+ String multipartKey, OMMetadataManager omMetadataManager,
+ String ozoneKey, TreeMap<Integer, PartKeyInfo> partKeyInfoMap,
+ List<OmKeyLocationInfo> partLocationInfos, long dataSize)
+ throws IOException {
+ HddsProtos.ReplicationType type = partKeyInfoMap.lastEntry().getValue()
+ .getPartKeyInfo().getType();
+ HddsProtos.ReplicationFactor factor =
+ partKeyInfoMap.lastEntry().getValue().getPartKeyInfo().getFactor();
+
+ OmKeyInfo omKeyInfo = getOmKeyInfoFromKeyTable(ozoneKey, keyName,
+ omMetadataManager);
+ if (omKeyInfo == null) {
+ // This is a newly added key, it does not have any versions.
+ OmKeyLocationInfoGroup keyLocationInfoGroup = new
+ OmKeyLocationInfoGroup(0, partLocationInfos);
+
+ // Get the objectID of the key from OpenKeyTable
+ OmKeyInfo dbOpenKeyInfo = getOmKeyInfoFromOpenKeyTable(multipartKey,
+ keyName, omMetadataManager);
+
+ // A newly created key, this is the first version.
+ OmKeyInfo.Builder builder =
+ new OmKeyInfo.Builder().setVolumeName(volumeName)
+ .setBucketName(bucketName).setKeyName(dbOpenKeyInfo.getKeyName())
+ .setReplicationFactor(factor).setReplicationType(type)
+ .setCreationTime(keyArgs.getModificationTime())
+ .setModificationTime(keyArgs.getModificationTime())
+ .setDataSize(dataSize)
+ .setOmKeyLocationInfos(
+ Collections.singletonList(keyLocationInfoGroup))
+ .setAcls(OzoneAclUtil.fromProtobuf(keyArgs.getAclsList()));
+ // Check if db entry has ObjectID. This check is required because
+ // it is possible that between multipart key uploads and complete,
+ // we had an upgrade.
+ if (dbOpenKeyInfo.getObjectID() != 0) {
+ builder.setObjectID(dbOpenKeyInfo.getObjectID());
+ }
+ builder.setParentObjectID(dbOpenKeyInfo.getParentObjectID());
+ builder.setFileName(dbOpenKeyInfo.getFileName());
Review comment:
Can we add isBucketFSOptimized() check when we setting the
parentObjectID/FileName? These two field are only used when BucketFSO is
enabled.
##########
File path:
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/client/rpc/TestOzoneClientMultipartUploadV1.java
##########
@@ -264,12 +274,232 @@ public void testUploadPartOverrideWithRatis() throws
IOException {
commitUploadPartInfo = ozoneOutputStream
.getCommitUploadPartInfo();
- assertNotNull(commitUploadPartInfo);
- assertNotNull(commitUploadPartInfo.getPartName());
+ Assert.assertNotNull(commitUploadPartInfo);
+ Assert.assertNotNull(commitUploadPartInfo.getPartName());
// PartName should be different from old part Name.
- assertNotEquals("Part names should be different", partName,
+ Assert.assertNotEquals("Part names should be different", partName,
commitUploadPartInfo.getPartName());
}
+ @Test
+ public void testMultipartUploadWithPartsLessThanMinSize() throws Exception {
Review comment:
Can we additionally add one another case for
OMException.ResultCodes.INVALID_PART_ORDER? I didn't this case was covered in
this unit test.
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]