echonesis commented on code in PR #9286:
URL: https://github.com/apache/ozone/pull/9286#discussion_r2726244904


##########
hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java:
##########
@@ -397,6 +396,139 @@ public Response listMultipartUploads(
     }
   }
 
+  private int sanitizeMaxUploads(int maxUploads) throws OS3Exception {
+    if (maxUploads < 1) {
+      throw newError(S3ErrorTable.INVALID_ARGUMENT, "max-uploads",
+          new Exception("max-uploads must be positive"));
+    }
+    return Math.min(maxUploads, 1000);
+  }
+
+  private void validateEncodingType(String encodingType) throws OS3Exception {
+    if (encodingType != null && !encodingType.equals(ENCODING_TYPE)) {
+      throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, encodingType);
+    }
+  }
+
+  private ListMultipartUploadsResult buildMultipartUploadsResult(
+      OzoneBucket bucket,
+      String prefix,
+      String delimiter,
+      String encodingType,
+      String keyMarker,
+      String uploadIdMarker,
+      int maxUploads,
+      OzoneMultipartUploadList ozoneMultipartUploadList) {
+
+    ListMultipartUploadsResult result = new ListMultipartUploadsResult();
+    result.setBucket(bucket.getName());
+    result.setKeyMarker(EncodingTypeObject.createNullable(keyMarker, 
encodingType));
+    result.setUploadIdMarker(uploadIdMarker);
+    result.setNextKeyMarker(EncodingTypeObject.createNullable(
+        ozoneMultipartUploadList.getNextKeyMarker(), encodingType));
+    result.setPrefix(EncodingTypeObject.createNullable(prefix, encodingType));
+    result.setDelimiter(EncodingTypeObject.createNullable(delimiter, 
encodingType));
+    result.setEncodingType(encodingType);
+    
result.setNextUploadIdMarker(ozoneMultipartUploadList.getNextUploadIdMarker());
+    result.setMaxUploads(maxUploads);
+    result.setTruncated(ozoneMultipartUploadList.isTruncated());
+
+    final String normalizedPrefix = prefix == null ? "" : prefix;
+    String prevDir = null;
+    String lastProcessedKey = null;
+    String lastProcessedUploadId = null;
+    int responseItemCount = 0;
+
+    List<OzoneMultipartUpload> pendingUploads =
+        ozoneMultipartUploadList.getUploads();
+    int processedUploads = 0;
+    for (OzoneMultipartUpload upload : pendingUploads) {
+      String keyName = upload.getKeyName();
+
+      if (bucket.getBucketLayout().isFileSystemOptimized()
+          && StringUtils.isNotEmpty(normalizedPrefix)
+          && !keyName.startsWith(normalizedPrefix)) {
+        continue;
+      }
+      if (keyName.length() < normalizedPrefix.length()) {
+        continue;
+      }

Review Comment:
   This check is redundant when `getBucketLayout().isFileSystemOptimized()` is 
`true` 
   (covered by L450's startsWith check). 
   
   Should the prefix filtering apply to all bucket layouts?



##########
hadoop-ozone/s3gateway/src/main/java/org/apache/hadoop/ozone/s3/endpoint/BucketEndpoint.java:
##########
@@ -397,6 +396,139 @@ public Response listMultipartUploads(
     }
   }
 
+  private int sanitizeMaxUploads(int maxUploads) throws OS3Exception {
+    if (maxUploads < 1) {
+      throw newError(S3ErrorTable.INVALID_ARGUMENT, "max-uploads",
+          new Exception("max-uploads must be positive"));
+    }
+    return Math.min(maxUploads, 1000);
+  }
+
+  private void validateEncodingType(String encodingType) throws OS3Exception {
+    if (encodingType != null && !encodingType.equals(ENCODING_TYPE)) {
+      throw S3ErrorTable.newError(S3ErrorTable.INVALID_ARGUMENT, encodingType);
+    }
+  }
+
+  private ListMultipartUploadsResult buildMultipartUploadsResult(
+      OzoneBucket bucket,
+      String prefix,
+      String delimiter,
+      String encodingType,
+      String keyMarker,
+      String uploadIdMarker,
+      int maxUploads,
+      OzoneMultipartUploadList ozoneMultipartUploadList) {
+
+    ListMultipartUploadsResult result = new ListMultipartUploadsResult();
+    result.setBucket(bucket.getName());
+    result.setKeyMarker(EncodingTypeObject.createNullable(keyMarker, 
encodingType));
+    result.setUploadIdMarker(uploadIdMarker);
+    result.setNextKeyMarker(EncodingTypeObject.createNullable(
+        ozoneMultipartUploadList.getNextKeyMarker(), encodingType));
+    result.setPrefix(EncodingTypeObject.createNullable(prefix, encodingType));
+    result.setDelimiter(EncodingTypeObject.createNullable(delimiter, 
encodingType));
+    result.setEncodingType(encodingType);
+    
result.setNextUploadIdMarker(ozoneMultipartUploadList.getNextUploadIdMarker());
+    result.setMaxUploads(maxUploads);
+    result.setTruncated(ozoneMultipartUploadList.isTruncated());
+
+    final String normalizedPrefix = prefix == null ? "" : prefix;
+    String prevDir = null;
+    String lastProcessedKey = null;
+    String lastProcessedUploadId = null;
+    int responseItemCount = 0;
+
+    List<OzoneMultipartUpload> pendingUploads =
+        ozoneMultipartUploadList.getUploads();
+    int processedUploads = 0;
+    for (OzoneMultipartUpload upload : pendingUploads) {
+      String keyName = upload.getKeyName();
+
+      if (bucket.getBucketLayout().isFileSystemOptimized()
+          && StringUtils.isNotEmpty(normalizedPrefix)
+          && !keyName.startsWith(normalizedPrefix)) {
+        continue;
+      }
+      if (keyName.length() < normalizedPrefix.length()) {
+        continue;
+      }
+
+      String relativeKeyName = keyName.substring(normalizedPrefix.length());
+      String currentDirName = null;
+      boolean isDirectoryPlaceholder = false;
+      if (StringUtils.isNotBlank(delimiter)) {
+        int depth = StringUtils.countMatches(relativeKeyName, delimiter);
+        if (depth > 0) {
+          int delimiterIndex = relativeKeyName.indexOf(delimiter);
+          currentDirName = relativeKeyName.substring(0, delimiterIndex);
+        } else if (relativeKeyName.endsWith(delimiter)) {
+          currentDirName = relativeKeyName.substring(
+              0, relativeKeyName.length() - delimiter.length());
+          isDirectoryPlaceholder = true;
+        }
+      }
+
+      if (responseItemCount >= maxUploads) {
+        if (StringUtils.isNotBlank(delimiter)
+            && currentDirName != null
+            && currentDirName.equals(prevDir)) {
+          lastProcessedKey = keyName;
+          lastProcessedUploadId = upload.getUploadId();
+          continue;

Review Comment:
   `processedUploads` won't be added in this condition.
   Is that what we want?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to