This is an automated email from the ASF dual-hosted git repository.

bharat pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hadoop-ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 5d14b1c  HDDS-3866. Support multi-part-upload with Freon S3 key 
generator (#1125)
5d14b1c is described below

commit 5d14b1c921fb7610521dd894703a188751928191
Author: Elek, Márton <[email protected]>
AuthorDate: Fri Jun 26 06:33:47 2020 +0200

    HDDS-3866. Support multi-part-upload with Freon S3 key generator (#1125)
---
 .../apache/hadoop/ozone/freon/S3KeyGenerator.java  | 72 ++++++++++++++++++++--
 1 file changed, 68 insertions(+), 4 deletions(-)

diff --git 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java
 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java
index eb9a0ce..035f195 100644
--- 
a/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java
+++ 
b/hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java
@@ -16,6 +16,10 @@
  */
 package org.apache.hadoop.ozone.freon;
 
+import java.io.ByteArrayInputStream;
+import java.nio.charset.StandardCharsets;
+import java.util.ArrayList;
+import java.util.List;
 import java.util.concurrent.Callable;
 
 import org.apache.hadoop.hdds.cli.HddsVersionProvider;
@@ -25,8 +29,16 @@ import 
com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
 import com.amazonaws.regions.Regions;
 import com.amazonaws.services.s3.AmazonS3;
 import com.amazonaws.services.s3.AmazonS3ClientBuilder;
+import static 
com.amazonaws.services.s3.internal.SkipMd5CheckStrategy.DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY;
+import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest;
+import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest;
+import com.amazonaws.services.s3.model.InitiateMultipartUploadResult;
+import com.amazonaws.services.s3.model.PartETag;
+import com.amazonaws.services.s3.model.UploadPartRequest;
+import com.amazonaws.services.s3.model.UploadPartResult;
 import com.codahale.metrics.Timer;
 import org.apache.commons.lang3.RandomStringUtils;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_MULTIPART_MIN_SIZE;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import picocli.CommandLine.Command;
@@ -54,7 +66,8 @@ public class S3KeyGenerator extends BaseFreonGenerator
   private String bucketName;
 
   @Option(names = {"-s", "--size"},
-      description = "Size of the generated key (in bytes)",
+      description = "Size of the generated key (in bytes) or size of one "
+          + "multipart upload part (in case of multipart upload)",
       defaultValue = "10240")
   private int fileSize;
 
@@ -63,6 +76,17 @@ public class S3KeyGenerator extends BaseFreonGenerator
       defaultValue = "http://localhost:9878";)
   private String endpoint;
 
+  @Option(names = {"--multi-part-upload"},
+      description = "User multi part upload",
+      defaultValue = "false")
+  private boolean multiPart;
+
+  @Option(names = {"--parts"},
+      description = "Number of parts for multipart upload (final size = "
+          + "--size * --parts)",
+      defaultValue = "10")
+  private int numberOfParts;
+
   private Timer timer;
 
   private String content;
@@ -72,6 +96,10 @@ public class S3KeyGenerator extends BaseFreonGenerator
   @Override
   public Void call() throws Exception {
 
+    if (multiPart && fileSize < OM_MULTIPART_MIN_SIZE) {
+      throw new IllegalArgumentException(
+          "Size of multipart upload parts should be at least 5MB (5242880)");
+    }
     init();
 
     AmazonS3ClientBuilder amazonS3ClientBuilder =
@@ -81,7 +109,8 @@ public class S3KeyGenerator extends BaseFreonGenerator
     if (endpoint.length() > 0) {
       amazonS3ClientBuilder
           .withPathStyleAccessEnabled(true)
-          .withEndpointConfiguration(new EndpointConfiguration(endpoint, ""));
+          .withEndpointConfiguration(
+              new EndpointConfiguration(endpoint, "us-east-1"));
 
     } else {
       amazonS3ClientBuilder.withRegion(Regions.DEFAULT_REGION);
@@ -93,6 +122,7 @@ public class S3KeyGenerator extends BaseFreonGenerator
 
     timer = getMetrics().timer("key-create");
 
+    System.setProperty(DISABLE_PUT_OBJECT_MD5_VALIDATION_PROPERTY, "true");
     runTests(this::createKey);
 
     return null;
@@ -100,9 +130,43 @@ public class S3KeyGenerator extends BaseFreonGenerator
 
   private void createKey(long counter) throws Exception {
     timer.time(() -> {
+      if (multiPart) {
+
+        final String keyName = generateObjectName(counter);
+        final InitiateMultipartUploadRequest initiateRequest =
+            new InitiateMultipartUploadRequest(bucketName, keyName);
+
+        final InitiateMultipartUploadResult initiateMultipartUploadResult =
+            s3.initiateMultipartUpload(initiateRequest);
+        final String uploadId = initiateMultipartUploadResult.getUploadId();
+
+        List<PartETag> parts = new ArrayList<>();
+        for (int i = 1; i <= numberOfParts; i++) {
+
+          final UploadPartRequest uploadPartRequest = new UploadPartRequest()
+              .withBucketName(bucketName)
+              .withKey(keyName)
+              .withPartNumber(i)
+              .withLastPart(i == numberOfParts)
+              .withUploadId(uploadId)
+              .withPartSize(fileSize)
+              .withInputStream(new ByteArrayInputStream(content.getBytes(
+                  StandardCharsets.UTF_8)));
+
+          final UploadPartResult uploadPartResult =
+              s3.uploadPart(uploadPartRequest);
+          parts.add(uploadPartResult.getPartETag());
+        }
+
+        s3.completeMultipartUpload(
+            new CompleteMultipartUploadRequest(bucketName, keyName, uploadId,
+                parts));
+
+      } else {
+        s3.putObject(bucketName, generateObjectName(counter),
+            content);
+      }
 
-      s3.putObject(bucketName, generateObjectName(counter),
-          content);
       return null;
     });
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to