This is an automated email from the ASF dual-hosted git repository.
elek pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/hadoop.git
The following commit(s) were added to refs/heads/trunk by this push:
new 4e0aa2c HDDS-948. MultipartUpload: S3 API for Abort Multipart Upload.
Contributed by Bharat Viswanadham.
4e0aa2c is described below
commit 4e0aa2ceac893b2f7f9b8d480cb83c840bf22b95
Author: Márton Elek <[email protected]>
AuthorDate: Thu Jan 24 20:24:19 2019 +0100
HDDS-948. MultipartUpload: S3 API for Abort Multipart Upload. Contributed
by Bharat Viswanadham.
---
.../src/main/smoketest/s3/MultipartUpload.robot | 11 ++++
.../org/apache/hadoop/ozone/om/KeyManagerImpl.java | 2 +-
.../hadoop/ozone/s3/endpoint/ObjectEndpoint.java | 37 ++++++++++++-
.../hadoop/ozone/client/OzoneBucketStub.java | 10 ++++
.../s3/endpoint/TestAbortMultipartUpload.java | 64 ++++++++++++++++++++++
.../hadoop/ozone/s3/endpoint/TestObjectDelete.java | 2 +-
6 files changed, 122 insertions(+), 4 deletions(-)
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 61960d8..d6da2db 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -145,6 +145,17 @@ Test Multipart Upload Complete Invalid part
${result} = Execute AWSS3APICli and checkrc
complete-multipart-upload --upload-id ${uploadID} --bucket ${BUCKET} --key
multipartKey3 --multipart-upload
'Parts=[{ETag=etag1,PartNumber=1},{ETag=etag2,PartNumber=2}]' 255
Should contain ${result} InvalidPart
+Test abort Multipart upload
+ ${result} = Execute AWSS3APICli create-multipart-upload
--bucket ${BUCKET} --key multipartKey4 --storage-class REDUCED_REDUNDANCY
+ ${uploadID} = Execute and checkrc echo '${result}' | jq -r
'.UploadId' 0
+ Should contain ${result} ${BUCKET}
+ Should contain ${result} multipartKey
+ Should contain ${result} UploadId
+
+ ${result} = Execute AWSS3APICli and checkrc
abort-multipart-upload --bucket ${BUCKET} --key multipartKey4 --upload-id
${uploadID} 0
+
+Test abort Multipart upload with invalid uploadId
+ ${result} = Execute AWSS3APICli and checkrc
abort-multipart-upload --bucket ${BUCKET} --key multipartKey5 --upload-id
"random" 255
Upload part with Incorrect uploadID
Execute echo "Multipart upload" >
/tmp/testfile
diff --git
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
index 752d18d..c3e7b0a 100644
---
a/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
+++
b/hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java
@@ -1016,7 +1016,7 @@ public class KeyManagerImpl implements KeyManager {
LOG.error("Abort Multipart Upload Failed: volume: " + volumeName +
"bucket: " + bucketName + "key: " + keyName, ex);
throw new OMException(ex.getMessage(), ResultCodes
- .COMPLETE_MULTIPART_UPLOAD_FAILED);
+ .ABORT_MULTIPART_UPLOAD_FAILED);
} finally {
metadataManager.getLock().releaseBucketLock(volumeName, bucketName);
}
diff --git
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index 0b75e53..1ecb757 100644
---
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -332,17 +332,50 @@ public class ObjectEndpoint extends EndpointBase {
}
/**
- * Delete a specific object from a bucket.
+ * Abort multipart upload request.
+ * @param bucket
+ * @param key
+ * @param uploadId
+ * @return Response
+ * @throws IOException
+ * @throws OS3Exception
+ */
+ private Response abortMultipartUpload(String bucket, String key, String
+ uploadId) throws IOException, OS3Exception {
+ try {
+ OzoneBucket ozoneBucket = getBucket(bucket);
+ ozoneBucket.abortMultipartUpload(key, uploadId);
+ } catch (IOException ex) {
+ if (ex.getMessage().contains("NO_SUCH_MULTIPART_UPLOAD")) {
+ throw S3ErrorTable.newError(S3ErrorTable.NO_SUCH_UPLOAD, uploadId);
+ }
+ throw ex;
+ }
+ return Response
+ .status(Status.NO_CONTENT)
+ .build();
+ }
+
+
+ /**
+ * Delete a specific object from a bucket, if query param uploadId is
+ * specified, this request is for abort multipart upload.
* <p>
* See: https://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectDELETE.html
+ * https://docs.aws.amazon.com/AmazonS3/latest/API/mpUploadAbort.html
* for more details.
*/
@DELETE
public Response delete(
@PathParam("bucket") String bucketName,
- @PathParam("path") String keyPath) throws IOException, OS3Exception {
+ @PathParam("path") String keyPath,
+ @QueryParam("uploadId") @DefaultValue("") String uploadId) throws
+ IOException, OS3Exception {
try {
+ if (uploadId != null && !uploadId.equals("")) {
+ return abortMultipartUpload(bucketName, keyPath, uploadId);
+ }
OzoneBucket bucket = getBucket(bucketName);
bucket.getKey(keyPath);
bucket.deleteKey(keyPath);
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
index 4584607..03fbb36 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/OzoneBucketStub.java
@@ -223,6 +223,16 @@ public class OzoneBucketStub extends OzoneBucket {
DigestUtils.sha256Hex(key));
}
+ @Override
+ public void abortMultipartUpload(String keyName, String uploadID) throws
+ IOException {
+ if (multipartUploadIdMap.get(keyName) == null) {
+ throw new IOException("NO_SUCH_MULTIPART_UPLOAD");
+ } else {
+ multipartUploadIdMap.remove(keyName);
+ }
+ }
+
/**
* Class used to hold part information in a upload part request.
*/
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
new file mode 100644
index 0000000..61cf48a
--- /dev/null
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestAbortMultipartUpload.java
@@ -0,0 +1,64 @@
+package org.apache.hadoop.ozone.s3.endpoint;
+
+import org.apache.hadoop.ozone.client.OzoneClientStub;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.apache.hadoop.ozone.s3.exception.S3ErrorTable;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.Response;
+
+
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.when;
+
+/**
+ * This class tests abort multipart upload request.
+ */
+public class TestAbortMultipartUpload {
+
+
+ @Test
+ public void testAbortMultipartUpload() throws Exception {
+
+ String bucket = "s3bucket";
+ String key = "key1";
+ OzoneClientStub client = new OzoneClientStub();
+ client.getObjectStore().createS3Bucket("ozone", bucket);
+
+ HttpHeaders headers = Mockito.mock(HttpHeaders.class);
+ when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
+ "STANDARD");
+
+ ObjectEndpoint rest = new ObjectEndpoint();
+ rest.setHeaders(headers);
+ rest.setClient(client);
+
+ Response response = rest.multipartUpload(bucket, key, "", "", null);
+
+ assertEquals(response.getStatus(), 200);
+ MultipartUploadInitiateResponse multipartUploadInitiateResponse =
+ (MultipartUploadInitiateResponse) response.getEntity();
+ assertNotNull(multipartUploadInitiateResponse.getUploadID());
+ String uploadID = multipartUploadInitiateResponse.getUploadID();
+
+
+ // Abort multipart upload
+ response = rest.delete(bucket, key, uploadID);
+
+ assertEquals(204, response.getStatus());
+
+ // test with unknown upload Id.
+ try {
+ rest.delete(bucket, key, "random");
+ } catch (OS3Exception ex) {
+ assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getCode(), ex.getCode());
+ assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(),
+ ex.getErrorMessage());
+ }
+
+ }
+}
diff --git
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java
index 395aceb..b5d0c93 100644
---
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java
+++
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectDelete.java
@@ -51,7 +51,7 @@ public class TestObjectDelete {
rest.setClient(client);
//WHEN
- rest.delete("b1", "key1");
+ rest.delete("b1", "key1", null);
//THEN
Assert.assertFalse("Bucket Should not contain any key after delete",
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]