This is an automated email from the ASF dual-hosted git repository.

adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git


The following commit(s) were added to refs/heads/master by this push:
     new 54a5985405 HDDS-9654. Support ranged GET request for a specified part 
(#5565)
54a5985405 is described below

commit 54a5985405d570b5aa44a0fe6bb5a20e12df333a
Author: tanvipenumudy <[email protected]>
AuthorDate: Sat Nov 18 01:39:34 2023 +0530

    HDDS-9654. Support ranged GET request for a specified part (#5565)
---
 .../ozone/client/protocol/ClientProtocol.java      |  12 ++
 .../apache/hadoop/ozone/client/rpc/RpcClient.java  |  17 +-
 .../src/main/smoketest/s3/MultipartUpload.robot    |  12 ++
 .../hadoop/ozone/TestMultipartObjectGet.java       | 222 +++++++++++++++++++++
 .../hadoop/ozone/s3/endpoint/ObjectEndpoint.java   |   6 +-
 .../hadoop/ozone/client/ClientProtocolStub.java    |   7 +
 .../hadoop/ozone/s3/endpoint/TestListParts.java    |   8 +-
 .../hadoop/ozone/s3/endpoint/TestObjectGet.java    |  16 +-
 .../ozone/s3/endpoint/TestPermissionCheck.java     |   2 +-
 .../ozone/s3/metrics/TestS3GatewayMetrics.java     |   8 +-
 10 files changed, 290 insertions(+), 20 deletions(-)

diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
index 2503f174e6..5316f7a99e 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/protocol/ClientProtocol.java
@@ -162,6 +162,18 @@ public interface ClientProtocol {
   OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName)
       throws IOException;
 
+  /**
+   * Get OzoneKey in S3 context.
+   * @param bucketName Name of the Bucket
+   * @param keyName Key name
+   * @param partNumber Multipart-upload part number
+   * @return {@link OzoneKey}
+   * @throws IOException
+   */
+  OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName,
+                                  int partNumber)
+      throws IOException;
+
   OzoneVolume buildOzoneVolume(OmVolumeArgs volume);
 
   /**
diff --git 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
index 7bc92e63b9..c85e2ed389 100644
--- 
a/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
+++ 
b/hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/rpc/RpcClient.java
@@ -1646,7 +1646,6 @@ public class RpcClient implements ClientProtocol {
       throws IOException {
     OmKeyInfo keyInfo =
         getKeyInfo(volumeName, bucketName, keyName, false);
-
     return getOzoneKeyDetails(keyInfo);
   }
 
@@ -1678,6 +1677,22 @@ public class RpcClient implements ClientProtocol {
     return getOzoneKeyDetails(keyInfo);
   }
 
+  @Override
+  public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName,
+                                         int partNumber) throws IOException {
+    OmKeyInfo keyInfo = getS3KeyInfo(bucketName, keyName, false);
+    List<OmKeyLocationInfo> filteredKeyLocationInfo = keyInfo
+        .getLatestVersionLocations().getBlocksLatestVersionOnly().stream()
+        .filter(omKeyLocationInfo -> omKeyLocationInfo.getPartNumber() ==
+            partNumber)
+        .collect(Collectors.toList());
+    keyInfo.updateLocationInfoList(filteredKeyLocationInfo, false);
+    keyInfo.setDataSize(filteredKeyLocationInfo.stream()
+        .mapToLong(OmKeyLocationInfo::getLength)
+        .sum());
+    return getOzoneKeyDetails(keyInfo);
+  }
+
   @NotNull
   private OmKeyInfo getS3KeyInfo(
       String bucketName, String keyName, boolean isHeadOp) throws IOException {
diff --git a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot 
b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
index 1ed2f2bc1b..04cce8fefc 100644
--- a/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
+++ b/hadoop-ozone/dist/src/main/smoketest/s3/MultipartUpload.robot
@@ -115,6 +115,12 @@ Test Multipart Upload Complete
                                 Execute                    cat /tmp/part1 
/tmp/part2 > /tmp/${PREFIX}-multipartKey1
     Compare files               /tmp/${PREFIX}-multipartKey1         
/tmp/${PREFIX}-multipartKey1.result
 
+    ${result} =                 Execute AWSS3ApiCli        get-object --bucket 
${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 1 
/tmp/${PREFIX}-multipartKey1-part1.result
+    Compare files               /tmp/part1        
/tmp/${PREFIX}-multipartKey1-part1.result
+
+    ${result} =                 Execute AWSS3ApiCli        get-object --bucket 
${BUCKET} --key ${PREFIX}/multipartKey1 --part-number 2 
/tmp/${PREFIX}-multipartKey1-part2.result
+    Compare files               /tmp/part2        
/tmp/${PREFIX}-multipartKey1-part2.result
+
 Test Multipart Upload Complete Entity too small
     ${result} =         Execute AWSS3APICli     create-multipart-upload 
--bucket ${BUCKET} --key ${PREFIX}/multipartKey2
     ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r 
'.UploadId'    0
@@ -183,6 +189,12 @@ Test Multipart Upload Complete Invalid part errors and 
complete mpu with few par
                         Execute                    cat /tmp/part1 /tmp/part3 > 
/tmp/${PREFIX}-multipartKey3
     Compare files       /tmp/${PREFIX}-multipartKey3         
/tmp/${PREFIX}-multipartKey3.result
 
+    ${result} =         Execute AWSS3ApiCli        get-object --bucket 
${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 1 
/tmp/${PREFIX}-multipartKey3-part1.result
+    Compare files       /tmp/part1         
/tmp/${PREFIX}-multipartKey3-part1.result
+
+    ${result} =         Execute AWSS3ApiCli        get-object --bucket 
${BUCKET} --key ${PREFIX}/multipartKey3 --part-number 3 
/tmp/${PREFIX}-multipartKey3-part3.result
+    Compare files       /tmp/part3         
/tmp/${PREFIX}-multipartKey3-part3.result
+
 Test abort Multipart upload
     ${result} =         Execute AWSS3APICli     create-multipart-upload 
--bucket ${BUCKET} --key ${PREFIX}/multipartKey4 --storage-class 
REDUCED_REDUNDANCY
     ${uploadID} =       Execute and checkrc     echo '${result}' | jq -r 
'.UploadId'    0
diff --git 
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
new file mode 100644
index 0000000000..37cc7a3411
--- /dev/null
+++ 
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMultipartObjectGet.java
@@ -0,0 +1,222 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone;
+
+import org.apache.hadoop.hdds.conf.DefaultConfigManager;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.utils.IOUtils;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.om.OzoneManager;
+import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadRequest;
+import org.apache.hadoop.ozone.s3.endpoint.CompleteMultipartUploadResponse;
+import org.apache.hadoop.ozone.s3.endpoint.MultipartUploadInitiateResponse;
+import org.apache.hadoop.ozone.s3.endpoint.ObjectEndpoint;
+import org.apache.hadoop.ozone.s3.exception.OS3Exception;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.mockito.Mockito;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import javax.ws.rs.container.ContainerRequestContext;
+import javax.ws.rs.core.HttpHeaders;
+import javax.ws.rs.core.MultivaluedHashMap;
+import javax.ws.rs.core.Response;
+import javax.ws.rs.core.UriInfo;
+import java.io.IOException;
+import java.io.ByteArrayInputStream;
+import java.security.SecureRandom;
+import java.util.ArrayList;
+import java.util.UUID;
+import java.util.List;
+import java.util.Base64;
+import java.util.concurrent.TimeoutException;
+
+import static java.nio.charset.StandardCharsets.UTF_8;
+import static org.apache.hadoop.ozone.s3.util.S3Consts.STORAGE_CLASS_HEADER;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertNotNull;
+import static org.mockito.Mockito.when;
+
+/**
+ * Integration test class for testing 'ranged' GET request for the part
+ * specified.
+ */
+public class TestMultipartObjectGet {
+
+  public static final Logger LOG = LoggerFactory.getLogger(
+      TestMultipartObjectGet.class);
+  private static OzoneConfiguration conf;
+  private static String clusterId;
+  private static String scmId;
+  private static String omServiceId;
+  private static String scmServiceId;
+  private static final String BUCKET = OzoneConsts.BUCKET;
+  private static final String KEY = OzoneConsts.KEY;
+  private static MiniOzoneHAClusterImpl cluster;
+  private static OzoneClient client;
+  private static HttpHeaders headers;
+  private static ContainerRequestContext context;
+
+  private static final ObjectEndpoint REST = new ObjectEndpoint();
+
+  @BeforeClass
+  public static void init() throws Exception {
+    conf = new OzoneConfiguration();
+    clusterId = UUID.randomUUID().toString();
+    scmId = UUID.randomUUID().toString();
+    omServiceId = "om-service-test";
+    scmServiceId = "scm-service-test";
+
+    startCluster();
+    client = cluster.newClient();
+    client.getObjectStore().createS3Bucket(BUCKET);
+
+    headers = Mockito.mock(HttpHeaders.class);
+    when(headers.getHeaderString(STORAGE_CLASS_HEADER)).thenReturn(
+        "STANDARD");
+
+    context = Mockito.mock(ContainerRequestContext.class);
+    Mockito.when(context.getUriInfo()).thenReturn(Mockito.mock(UriInfo.class));
+    Mockito.when(context.getUriInfo().getQueryParameters())
+        .thenReturn(new MultivaluedHashMap<>());
+
+    REST.setHeaders(headers);
+    REST.setClient(client);
+    REST.setOzoneConfiguration(conf);
+    REST.setContext(context);
+  }
+
+  private static void startCluster()
+      throws IOException, TimeoutException, InterruptedException {
+    OzoneManager.setTestSecureOmFlag(true);
+    MiniOzoneCluster.Builder builder = MiniOzoneCluster.newHABuilder(conf)
+        .setClusterId(clusterId)
+        .setSCMServiceId(scmServiceId)
+        .setOMServiceId(omServiceId)
+        .setScmId(scmId)
+        .setNumDatanodes(3)
+        .setNumOfStorageContainerManagers(3)
+        .setNumOfOzoneManagers(3);
+    cluster = (MiniOzoneHAClusterImpl) builder.build();
+    cluster.waitForClusterToBeReady();
+  }
+
+  @AfterClass
+  public static void stop() {
+    IOUtils.close(LOG, client);
+    if (cluster != null) {
+      cluster.stop();
+    }
+    DefaultConfigManager.clearDefaultConfigs();
+  }
+
+  private String initiateMultipartUpload() throws IOException, OS3Exception {
+    Response response = REST.initializeMultipartUpload(BUCKET, KEY);
+    MultipartUploadInitiateResponse multipartUploadInitiateResponse =
+        (MultipartUploadInitiateResponse) response.getEntity();
+    assertNotNull(multipartUploadInitiateResponse.getUploadID());
+    String uploadID = multipartUploadInitiateResponse.getUploadID();
+    assertEquals(200, response.getStatus());
+    return uploadID;
+  }
+
+  private CompleteMultipartUploadRequest.Part uploadPart(String uploadID,
+                                                         int partNumber,
+                                                         String content)
+      throws IOException, OS3Exception {
+    ByteArrayInputStream body =
+        new ByteArrayInputStream(content.getBytes(UTF_8));
+    Response response = REST.put(BUCKET, KEY, content.length(),
+        partNumber, uploadID, body);
+    assertEquals(200, response.getStatus());
+    assertNotNull(response.getHeaderString("ETag"));
+
+    CompleteMultipartUploadRequest.Part
+        part = new CompleteMultipartUploadRequest.Part();
+    part.seteTag(response.getHeaderString("ETag"));
+    part.setPartNumber(partNumber);
+    return part;
+  }
+
+  private void completeMultipartUpload(
+      CompleteMultipartUploadRequest completeMultipartUploadRequest,
+      String uploadID) throws IOException, OS3Exception {
+    Response response = REST.completeMultipartUpload(BUCKET, KEY, uploadID,
+        completeMultipartUploadRequest);
+    assertEquals(200, response.getStatus());
+
+    CompleteMultipartUploadResponse completeMultipartUploadResponse =
+        (CompleteMultipartUploadResponse) response.getEntity();
+    assertEquals(BUCKET, completeMultipartUploadResponse.getBucket());
+    assertEquals(KEY, completeMultipartUploadResponse.getKey());
+    assertEquals(BUCKET, completeMultipartUploadResponse.getLocation());
+    assertNotNull(completeMultipartUploadResponse.getETag());
+  }
+
+  private void getObjectMultipart(int partNumber, long bytes)
+      throws IOException, OS3Exception {
+    Response response =
+        REST.get(BUCKET, KEY, partNumber, null, 100, null);
+    assertEquals(200, response.getStatus());
+    assertEquals(bytes, response.getLength());
+  }
+
+  @Test
+  public void testMultipart() throws Exception {
+    String uploadID = initiateMultipartUpload();
+    List<CompleteMultipartUploadRequest.Part> partsList = new ArrayList<>();
+
+    String content1 = generateRandomContent(5);
+    int partNumber = 1;
+    CompleteMultipartUploadRequest.Part
+        part1 = uploadPart(uploadID, partNumber, content1);
+    partsList.add(part1);
+
+    String content2 = generateRandomContent(5);
+    partNumber = 2;
+    CompleteMultipartUploadRequest.Part
+        part2 = uploadPart(uploadID, partNumber, content2);
+    partsList.add(part2);
+
+    String content3 = generateRandomContent(1);
+    partNumber = 3;
+    CompleteMultipartUploadRequest.Part
+        part3 = uploadPart(uploadID, partNumber, content3);
+    partsList.add(part3);
+
+    CompleteMultipartUploadRequest completeMultipartUploadRequest = new
+        CompleteMultipartUploadRequest();
+    completeMultipartUploadRequest.setPartList(partsList);
+    completeMultipartUpload(completeMultipartUploadRequest, uploadID);
+
+    getObjectMultipart(0,
+        (content1 + content2 + content3).getBytes(UTF_8).length);
+    getObjectMultipart(1, content1.getBytes(UTF_8).length);
+    getObjectMultipart(2, content2.getBytes(UTF_8).length);
+    getObjectMultipart(3, content3.getBytes(UTF_8).length);
+  }
+
+  private static String generateRandomContent(int sizeInMB) {
+    int bytesToGenerate = sizeInMB * 1024 * 1024;
+    byte[] randomBytes = new byte[bytesToGenerate];
+    new SecureRandom().nextBytes(randomBytes);
+    return Base64.getEncoder().encodeToString(randomBytes);
+  }
+}
diff --git 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
index d85a628ea3..ad4bbebeeb 100644
--- 
a/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
+++ 
b/hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/ObjectEndpoint.java
@@ -376,6 +376,7 @@ public class ObjectEndpoint extends EndpointBase {
   public Response get(
       @PathParam("bucket") String bucketName,
       @PathParam("path") String keyPath,
+      @QueryParam("partNumber") int partNumber,
       @QueryParam("uploadId") String uploadId,
       @QueryParam("max-parts") @DefaultValue("1000") int maxParts,
       @QueryParam("part-number-marker") String partNumberMarker)
@@ -393,8 +394,9 @@ public class ObjectEndpoint extends EndpointBase {
             partMarker, maxParts);
       }
 
-      OzoneKeyDetails keyDetails = getClientProtocol()
-          .getS3KeyDetails(bucketName, keyPath);
+      OzoneKeyDetails keyDetails = (partNumber != 0) ?
+          getClientProtocol().getS3KeyDetails(bucketName, keyPath, partNumber) 
:
+          getClientProtocol().getS3KeyDetails(bucketName, keyPath);
 
       isFile(keyPath, keyDetails);
 
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
index 44698a3649..174af69e25 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/client/ClientProtocolStub.java
@@ -120,6 +120,13 @@ public class ClientProtocolStub implements ClientProtocol {
     return objectStoreStub.getS3Volume().getBucket(bucketName).getKey(keyName);
   }
 
+  @Override
+  public OzoneKeyDetails getS3KeyDetails(String bucketName, String keyName,
+                                         int partNumber)
+      throws IOException {
+    return objectStoreStub.getS3Volume().getBucket(bucketName).getKey(keyName);
+  }
+
   @Override
   public OzoneVolume buildOzoneVolume(OmVolumeArgs volume) {
     return null;
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
index d6b02a3f4a..9b37024d33 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestListParts.java
@@ -93,7 +93,7 @@ public class TestListParts {
 
   @Test
   public void testListParts() throws Exception {
-    Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+    Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0,
         uploadID, 3, "0");
 
     ListPartsResponse listPartsResponse =
@@ -106,7 +106,7 @@ public class TestListParts {
 
   @Test
   public void testListPartsContinuation() throws Exception {
-    Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+    Response response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0,
         uploadID, 2, "0");
     ListPartsResponse listPartsResponse =
         (ListPartsResponse) response.getEntity();
@@ -115,7 +115,7 @@ public class TestListParts {
     assertEquals(2, listPartsResponse.getPartList().size());
 
     // Continue
-    response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, uploadID, 2,
+    response = REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0, uploadID, 2,
         Integer.toString(listPartsResponse.getNextPartNumberMarker()));
     listPartsResponse = (ListPartsResponse) response.getEntity();
 
@@ -127,7 +127,7 @@ public class TestListParts {
   @Test
   public void testListPartsWithUnknownUploadID() throws Exception {
     try {
-      REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY,
+      REST.get(OzoneConsts.S3_BUCKET, OzoneConsts.KEY, 0,
           uploadID, 2, "0");
     } catch (OS3Exception ex) {
       Assert.assertEquals(S3ErrorTable.NO_SUCH_UPLOAD.getErrorMessage(),
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
index 150b44436b..0ab94522c6 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestObjectGet.java
@@ -101,7 +101,7 @@ public class TestObjectGet {
   @Test
   public void get() throws IOException, OS3Exception {
     //WHEN
-    Response response = rest.get("b1", "key1", null, 0, null);
+    Response response = rest.get("b1", "key1", 0, null, 0, null);
 
     //THEN
     OzoneInputStream ozoneInputStream =
@@ -123,7 +123,7 @@ public class TestObjectGet {
   public void inheritRequestHeader() throws IOException, OS3Exception {
     setDefaultHeader();
 
-    Response response = rest.get("b1", "key1", null, 0, null);
+    Response response = rest.get("b1", "key1", 0, null, 0, null);
 
     Assert.assertEquals(CONTENT_TYPE1,
         response.getHeaderString("Content-Type"));
@@ -156,7 +156,7 @@ public class TestObjectGet {
 
     Mockito.when(context.getUriInfo().getQueryParameters())
         .thenReturn(queryParameter);
-    Response response = rest.get("b1", "key1", null, 0, null);
+    Response response = rest.get("b1", "key1", 0, null, 0, null);
 
     Assert.assertEquals(CONTENT_TYPE2,
         response.getHeaderString("Content-Type"));
@@ -177,13 +177,13 @@ public class TestObjectGet {
     Response response;
     
Mockito.when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-0");
 
-    response = rest.get("b1", "key1", null, 0, null);
+    response = rest.get("b1", "key1", 0, null, 0, null);
     Assert.assertEquals("1", response.getHeaderString("Content-Length"));
     Assert.assertEquals(String.format("bytes 0-0/%s", CONTENT.length()),
         response.getHeaderString("Content-Range"));
 
     Mockito.when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-");
-    response = rest.get("b1", "key1", null, 0, null);
+    response = rest.get("b1", "key1", 0, null, 0, null);
     Assert.assertEquals(String.valueOf(CONTENT.length()),
         response.getHeaderString("Content-Length"));
     Assert.assertEquals(
@@ -194,7 +194,7 @@ public class TestObjectGet {
   @Test
   public void getStatusCode() throws IOException, OS3Exception {
     Response response;
-    response = rest.get("b1", "key1", null, 0, null);
+    response = rest.get("b1", "key1", 0, null, 0, null);
     Assert.assertEquals(response.getStatus(),
         Response.Status.OK.getStatusCode());
 
@@ -202,7 +202,7 @@ public class TestObjectGet {
     // The 206 (Partial Content) status code indicates that the server is
     //   successfully fulfilling a range request for the target resource
     
Mockito.when(headers.getHeaderString(RANGE_HEADER)).thenReturn("bytes=0-1");
-    response = rest.get("b1", "key1", null, 0, null);
+    response = rest.get("b1", "key1", 0, null, 0, null);
     Assert.assertEquals(response.getStatus(),
         Response.Status.PARTIAL_CONTENT.getStatusCode());
   }
@@ -237,7 +237,7 @@ public class TestObjectGet {
     // WHEN
     final OS3Exception ex =
         Assertions.assertThrows(OS3Exception.class,
-            () -> rest.get(bucketName, keyPath, null, 0, null));
+            () -> rest.get(bucketName, keyPath, 0, null, 0, null));
 
     // THEN
     Assertions.assertEquals(NO_SUCH_KEY.getCode(), ex.getCode());
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
index 264f5486d1..51b44998f0 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/endpoint/TestPermissionCheck.java
@@ -267,7 +267,7 @@ public class TestPermissionCheck {
     objectEndpoint.setOzoneConfiguration(conf);
 
     try {
-      objectEndpoint.get("bucketName", "keyPath", null, 1000, "marker");
+      objectEndpoint.get("bucketName", "keyPath", 0, null, 1000, "marker");
       Assert.fail("Should fail");
     } catch (Exception e) {
       e.printStackTrace();
diff --git 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
index 9dacc7a7db..3596f74c47 100644
--- 
a/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
+++ 
b/hadoop-ozone/s3gateway/src/test/java/org/apache/hadoop/ozone/s3/metrics/TestS3GatewayMetrics.java
@@ -392,7 +392,7 @@ public class TestS3GatewayMetrics {
     keyEndpoint.put(bucketName, keyName, CONTENT
         .length(), 1, null, body);
     // GET the key from the bucket
-    Response response = keyEndpoint.get(bucketName, keyName, null, 0, null);
+    Response response = keyEndpoint.get(bucketName, keyName, 0, null, 0, null);
     StreamingOutput stream = (StreamingOutput) response.getEntity();
     stream.write(new ByteArrayOutputStream());
     long curMetric = metrics.getGetKeySuccess();
@@ -404,7 +404,7 @@ public class TestS3GatewayMetrics {
     long oriMetric = metrics.getGetKeyFailure();
     // Fetching a non-existent key
     try {
-      keyEndpoint.get(bucketName, "unknownKey", null, 0,
+      keyEndpoint.get(bucketName, "unknownKey", 0, null, 0,
           null);
       fail();
     } catch (OS3Exception ex) {
@@ -536,7 +536,7 @@ public class TestS3GatewayMetrics {
     String uploadID = initiateMultipartUpload(bucketName, keyName);
 
     // Listing out the parts by providing the uploadID
-    keyEndpoint.get(bucketName, keyName,
+    keyEndpoint.get(bucketName, keyName, 0,
         uploadID, 3, null);
     long curMetric = metrics.getListPartsSuccess();
     assertEquals(1L, curMetric - oriMetric);
@@ -548,7 +548,7 @@ public class TestS3GatewayMetrics {
     long oriMetric = metrics.getListPartsFailure();
     try {
       // Listing out the parts by providing the uploadID after aborting
-      keyEndpoint.get(bucketName, keyName,
+      keyEndpoint.get(bucketName, keyName, 0,
           "wrong_id", 3, null);
       fail();
     } catch (OS3Exception ex) {


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to