cryptoe commented on code in PR #12874: URL: https://github.com/apache/druid/pull/12874#discussion_r942479194
########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/S3OutputConfig.java: ########## @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.HumanReadableBytes; +import org.apache.druid.java.util.common.HumanReadableBytesRange; +import org.apache.druid.java.util.common.RetryUtils; + +import javax.annotation.Nullable; +import java.io.File; + +public class S3OutputConfig +{ + @JsonProperty + private String bucket; + + @JsonProperty + private String prefix; + + @JsonProperty + private File tempDir; Review Comment: That would be really nice to have but I think we are not there yet. We would have to change a lot of places to get there. I will file a GitHub issue for this. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/S3OutputConfig.java: ########## @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.HumanReadableBytes; +import org.apache.druid.java.util.common.HumanReadableBytesRange; +import org.apache.druid.java.util.common.RetryUtils; + +import javax.annotation.Nullable; +import java.io.File; + +public class S3OutputConfig +{ + @JsonProperty + private String bucket; + + @JsonProperty + private String prefix; + + @JsonProperty + private File tempDir; + + @Nullable + @JsonProperty + @HumanReadableBytesRange( + min = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MIN_PART_SIZE, + max = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MAX_PART_SIZE + ) // limits of s3 multipart upload + private HumanReadableBytes chunkSize; + + /** + * Max size for each query results. This limit relies on the s3 multipart upload limits. + * See https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html for more details. + * + * @see RetriableS3OutputStream + */ + @JsonProperty + @HumanReadableBytesRange(min = 5L * 1024 * 1024, max = 5L * 1024 * 1024 * 1024 * 1024) + private HumanReadableBytes maxResultsSize = new HumanReadableBytes("100MiB"); Review Comment: I updated the documentation to object . They need not be query results but any kind of objects. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/RetriableS3OutputStream.java: ########## @@ -0,0 +1,432 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Stopwatch; +import com.google.common.io.CountingOutputStream; +import it.unimi.dsi.fastutil.io.FastBufferedOutputStream; +import org.apache.druid.java.util.common.FileUtils; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.IOE; +import org.apache.druid.java.util.common.RetryUtils; +import org.apache.druid.java.util.common.io.Closer; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.storage.s3.S3Utils; +import org.apache.druid.storage.s3.ServerSideEncryptingAmazonS3; + +import java.io.Closeable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * A retriable output stream for s3. How it works is, + * <p> + * 1) When new data is written, it first creates a chunk in local disk. + * 2) New data is written to the local chunk until it is full. + * 3) When the chunk is full, it uploads the chunk to s3 using the multipart upload API. + * Since this happens synchronously, {@link #write(byte[], int, int)} can be blocked until the upload is done. + * The upload can be retries when it fails with transient errors. + * 4) Once the upload succeeds, it creates a new chunk and continue. + * 5) When the stream is closed, it uploads the last chunk and finalize the multipart upload. + * {@link #close()} can be blocked until upload is done. + * <p> + * For compression format support, this output stream supports compression formats if they are <i>concatenatable</i>, + * such as ZIP or GZIP. + * <p> + * This class is not thread-safe. + * <p> + * This class can be moved to the s3 extension as a low-level API, + * whereas it currently provides only high-level APIs such as S3DataSegmentPuller. + */ +public class RetriableS3OutputStream extends OutputStream +{ + public static final long S3_MULTIPART_UPLOAD_MIN_PART_SIZE = 5L * 1024 * 1024; + public static final long S3_MULTIPART_UPLOAD_MAX_PART_SIZE = 5L * 1024 * 1024 * 1024L; + + private static final Logger LOG = new Logger(RetriableS3OutputStream.class); + private static final Joiner JOINER = Joiner.on("/").skipNulls(); + private static final int S3_MULTIPART_UPLOAD_MAX_NUM_PARTS = 10_000; + + private final S3OutputConfig config; + private final ServerSideEncryptingAmazonS3 s3; + private final String s3Key; + private final String uploadId; + private final File chunkStorePath; + private final long chunkSize; + + private final List<PartETag> pushResults = new ArrayList<>(); + private final byte[] singularBuffer = new byte[1]; + + // metric + private final Stopwatch pushStopwatch; + + private Chunk currentChunk; + private int nextChunkId = 1; // multipart upload requires partNumber to be in the range between 1 and 10000 + private int numChunksPushed; + /** + * Total size of all chunks. This size is updated whenever the chunk is ready for push, + * not when {@link #write(byte[], int, int)} is called. This is because + * it will be hard to know the increase of chunk size in write() when the chunk is compressed. + */ + private long resultsSize; + + /** + * A flag indicating whether there was an upload error. + * This flag is tested in {@link #close()} to determine whether it needs to upload the current chunk or not. + */ + private boolean error; + private boolean closed; + + public RetriableS3OutputStream( + S3OutputConfig config, + ServerSideEncryptingAmazonS3 s3, + String s3Key + ) throws IOException + { + + this(config, s3, s3Key, true); + } + + @VisibleForTesting + protected RetriableS3OutputStream( + S3OutputConfig config, + ServerSideEncryptingAmazonS3 s3, + String s3Key, + boolean chunkValidation + ) throws IOException + { + this.config = config; + this.s3 = s3; + this.s3Key = s3Key; + + final InitiateMultipartUploadResult result = s3.initiateMultipartUpload( + new InitiateMultipartUploadRequest(config.getBucket(), s3Key) + ); + this.uploadId = result.getUploadId(); + this.chunkStorePath = new File(config.getTempDir(), uploadId + UUID.randomUUID()); + FileUtils.mkdirp(this.chunkStorePath); + this.chunkSize = config.getChunkSize() == null ? computeChunkSize(config) : config.getChunkSize(); + if (chunkValidation) { + validateChunkSize(config.getMaxResultsSize(), chunkSize); + } + this.pushStopwatch = Stopwatch.createUnstarted(); + this.pushStopwatch.reset(); + + this.currentChunk = new Chunk(nextChunkId, new File(chunkStorePath, String.valueOf(nextChunkId++))); + } + + private static long computeChunkSize(S3OutputConfig config) + { + return computeMinChunkSize(config.getMaxResultsSize()); + } + + private static void validateChunkSize(long maxResultsSize, long chunkSize) + { + if (computeMinChunkSize(maxResultsSize) > chunkSize) { + throw new IAE( + "chunkSize[%s] is too small for maxResultsSize[%s]. chunkSize should be at least [%s]", + chunkSize, + maxResultsSize, + computeMinChunkSize(maxResultsSize) + ); + } + if (S3_MULTIPART_UPLOAD_MAX_PART_SIZE < chunkSize) { + throw new IAE( + "chunkSize[%s] should be smaller than [%s]", + chunkSize, + S3_MULTIPART_UPLOAD_MAX_PART_SIZE + ); + } + } + + private static long computeMinChunkSize(long maxResultsSize) + { + return Math.max( + (long) Math.ceil(maxResultsSize / (double) S3_MULTIPART_UPLOAD_MAX_NUM_PARTS), + S3_MULTIPART_UPLOAD_MIN_PART_SIZE + ); + } + + public static String getS3KeyForQuery(String prefix, String asyncResultId) + { + return JOINER.join( + prefix, + asyncResultId Review Comment: Ahh this is not longer needed now. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/S3OutputConfig.java: ########## @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.HumanReadableBytes; +import org.apache.druid.java.util.common.HumanReadableBytesRange; +import org.apache.druid.java.util.common.RetryUtils; + +import javax.annotation.Nullable; +import java.io.File; + +public class S3OutputConfig +{ + @JsonProperty + private String bucket; + + @JsonProperty + private String prefix; + + @JsonProperty + private File tempDir; + + @Nullable + @JsonProperty + @HumanReadableBytesRange( + min = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MIN_PART_SIZE, + max = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MAX_PART_SIZE + ) // limits of s3 multipart upload + private HumanReadableBytes chunkSize; + + /** + * Max size for each query results. This limit relies on the s3 multipart upload limits. + * See https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html for more details. + * + * @see RetriableS3OutputStream + */ + @JsonProperty + @HumanReadableBytesRange(min = 5L * 1024 * 1024, max = 5L * 1024 * 1024 * 1024 * 1024) + private HumanReadableBytes maxResultsSize = new HumanReadableBytes("100MiB"); + + /** + * Max number of tries for each upload. + */ + @JsonProperty + private int maxTriesOnTransientErrors = RetryUtils.DEFAULT_MAX_TRIES; + + public String getBucket() + { + return bucket; + } + + public String getPrefix() + { + return prefix; + } + + public File getTempDir() + { + return tempDir; + } + + @Nullable + public Long getChunkSize() + { + return chunkSize == null ? null : chunkSize.getBytes(); Review Comment: Move the logic in this class. That seemed better ########## extensions-core/s3-extensions/src/test/java/org/apache/druid/storage/s3/output/RetriableS3OutputStreamTest.java: ########## @@ -0,0 +1,382 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.SdkClientException; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import org.apache.druid.java.util.common.IOE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.storage.s3.NoopServerSideEncryption; +import org.apache.druid.storage.s3.ServerSideEncryptingAmazonS3; +import org.easymock.EasyMock; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class RetriableS3OutputStreamTest +{ + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + private final TestAmazonS3 s3 = new TestAmazonS3(0); + private final String path = "resultId"; + + + private S3OutputConfig config; + private long maxResultsSize; + private long chunkSize; + + @Before + public void setup() throws IOException + { + final File tempDir = temporaryFolder.newFolder(); + chunkSize = 10L; + config = new S3OutputConfig() + { + @Override + public File getTempDir() + { + return tempDir; + } + + @Override + public Long getChunkSize() + { + return chunkSize; + } + + @Override + public long getMaxResultsSize() + { + return maxResultsSize; + } + + @Override + public int getMaxTriesOnTransientError() + { + return 2; + } + }; + } + + @Test + public void testTooSmallChunkSize() throws IOException + { + maxResultsSize = 100000000000L; + chunkSize = 9000000L; + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage( + "chunkSize[9000000] is too small for maxResultsSize[100000000000]. chunkSize should be at least [10000000]" + ); + new RetriableS3OutputStream(config, s3, path).close(); + } + + @Test + public void testTooSmallChunkSizeMaxResultsSizeIsNotRetionalToMaxPartNum() throws IOException + { + maxResultsSize = 274877906944L; + chunkSize = 27487790; + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage( + "chunkSize[27487790] is too small for maxResultsSize[274877906944]. chunkSize should be at least [27487791]" + ); + new RetriableS3OutputStream(config, s3, path).close(); + } + + @Test + public void testTooLargeChunkSize() throws IOException + { + maxResultsSize = 1024L * 1024 * 1024 * 1024; + chunkSize = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MAX_PART_SIZE + 1; + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage( + "chunkSize[5368709121] should be smaller than [5368709120]" + ); + new RetriableS3OutputStream(config, s3, path).close(); + } + + @Test + public void testWriteAndHappy() throws IOException + { + maxResultsSize = 1000; + chunkSize = 10; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + for (int i = 0; i < 25; i++) { + bb.clear(); + bb.putInt(i); + out.write(bb.array()); + } + } + // each chunk is 10 bytes, so there should be 10 chunks. + Assert.assertEquals(10, s3.partRequests.size()); + s3.assertCompleted(chunkSize, Integer.BYTES * 25); + } + + @Test + public void testWriteSizeLargerThanConfiguredMaxChunkSizeShouldSucceed() throws IOException + { + maxResultsSize = 1000; + chunkSize = 10; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES * 3); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + bb.clear(); + bb.putInt(1); + bb.putInt(2); + bb.putInt(3); + out.write(bb.array()); + } + // each chunk 10 bytes, so there should be 2 chunks. + Assert.assertEquals(2, s3.partRequests.size()); + s3.assertCompleted(chunkSize, Integer.BYTES * 3); + } + + @Test + public void testWriteSmallBufferShouldSucceed() throws IOException + { + maxResultsSize = 1000; + chunkSize = 128; + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + for (int i = 0; i < 600; i++) { + out.write(i); + } + } + // each chunk 128 bytes, so there should be 5 chunks. + Assert.assertEquals(5, s3.partRequests.size()); + s3.assertCompleted(chunkSize, 600); + } + + @Test + public void testHitResultsSizeLimit() throws IOException + { + maxResultsSize = 50; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( Review Comment: Arent we doing that now? As this test case specifically checks the chunking logic for the Retryable OutputStream hence not going throught the connector. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/RetriableS3OutputStream.java: ########## @@ -0,0 +1,432 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Stopwatch; +import com.google.common.io.CountingOutputStream; +import it.unimi.dsi.fastutil.io.FastBufferedOutputStream; +import org.apache.druid.java.util.common.FileUtils; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.IOE; +import org.apache.druid.java.util.common.RetryUtils; +import org.apache.druid.java.util.common.io.Closer; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.storage.s3.S3Utils; +import org.apache.druid.storage.s3.ServerSideEncryptingAmazonS3; + +import java.io.Closeable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * A retriable output stream for s3. How it works is, + * <p> + * 1) When new data is written, it first creates a chunk in local disk. + * 2) New data is written to the local chunk until it is full. + * 3) When the chunk is full, it uploads the chunk to s3 using the multipart upload API. + * Since this happens synchronously, {@link #write(byte[], int, int)} can be blocked until the upload is done. + * The upload can be retries when it fails with transient errors. + * 4) Once the upload succeeds, it creates a new chunk and continue. + * 5) When the stream is closed, it uploads the last chunk and finalize the multipart upload. + * {@link #close()} can be blocked until upload is done. + * <p> + * For compression format support, this output stream supports compression formats if they are <i>concatenatable</i>, + * such as ZIP or GZIP. + * <p> + * This class is not thread-safe. + * <p> + * This class can be moved to the s3 extension as a low-level API, + * whereas it currently provides only high-level APIs such as S3DataSegmentPuller. + */ +public class RetriableS3OutputStream extends OutputStream Review Comment: I think for now this is quite tied to s3 due to the way the multipart api works. The decision to refactor the chunking in a way that gcp, azure can also use can be deffered to another point in time when we are implementing those constructs. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/S3OutputConfig.java: ########## @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.HumanReadableBytes; +import org.apache.druid.java.util.common.HumanReadableBytesRange; +import org.apache.druid.java.util.common.RetryUtils; + +import javax.annotation.Nullable; +import java.io.File; + +public class S3OutputConfig +{ + @JsonProperty + private String bucket; + + @JsonProperty + private String prefix; + + @JsonProperty + private File tempDir; + + @Nullable + @JsonProperty + @HumanReadableBytesRange( + min = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MIN_PART_SIZE, + max = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MAX_PART_SIZE + ) // limits of s3 multipart upload + private HumanReadableBytes chunkSize; Review Comment: This is an optional config. Moreover, the range is controlled by AWS s3. https://docs.aws.amazon.com/AmazonS3/latest/userguide/qfacts.html As the consumers of this config, work on top of streams, we do not know the full size of the object hence this is required. Also if the user does not specify it, we calculate it from the maxResultSize. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/S3StorageConnectorModule.java: ########## @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.fasterxml.jackson.databind.Module; +import com.fasterxml.jackson.databind.module.SimpleModule; +import com.google.inject.Binder; +import org.apache.druid.initialization.DruidModule; + +import java.util.Collections; +import java.util.List; + +public class S3StorageConnectorModule implements DruidModule +{ + @Override + public List<? extends Module> getJacksonModules() + { + return Collections.singletonList( + new SimpleModule(this.getClass().getSimpleName()).registerSubtypes(S3StorageConnectorProvider.class) + ); + } + + @Override + public void configure(Binder binder) + { + } Review Comment: So connector would be a namespace based singleton in guice which will have items like configs injected to it. Lifecycle actually only makes sense for local storage which can be tackled as part of a follow up PR. Each connector can have a custom config that has more things injected into it. All config based user inputs like bucket, prefix are injected via Json configuration and rest like s3 client are injected using guice. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/S3OutputConfig.java: ########## @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.HumanReadableBytes; +import org.apache.druid.java.util.common.HumanReadableBytesRange; +import org.apache.druid.java.util.common.RetryUtils; + +import javax.annotation.Nullable; +import java.io.File; + +public class S3OutputConfig Review Comment: Nope, each S3OutputConfig is namespaced by the extension. So we might have multiple output configs per druid process but only one would be activated per extension. ########## processing/src/main/java/org/apache/druid/guice/StartupInjectorBuilder.java: ########## @@ -47,18 +48,15 @@ public StartupInjectorBuilder() new JacksonModule(), new ConfigModule(), new NullHandlingModule(), + new StorageConnectorModule(), Review Comment: Yeah, the CoreInjector seems the correct place. I have added the comment in the StartupInjectorBuilder class. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/S3OutputConfig.java: ########## @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.fasterxml.jackson.annotation.JsonProperty; +import org.apache.druid.java.util.common.HumanReadableBytes; +import org.apache.druid.java.util.common.HumanReadableBytesRange; +import org.apache.druid.java.util.common.RetryUtils; + +import javax.annotation.Nullable; +import java.io.File; + +public class S3OutputConfig +{ + @JsonProperty + private String bucket; Review Comment: These come from the user as part of the extension config. Checkout the comment in storageConnector interface. ########## extensions-core/s3-extensions/src/test/java/org/apache/druid/storage/s3/output/RetriableS3OutputStreamTest.java: ########## @@ -0,0 +1,382 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.SdkClientException; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import org.apache.druid.java.util.common.IOE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.storage.s3.NoopServerSideEncryption; +import org.apache.druid.storage.s3.ServerSideEncryptingAmazonS3; +import org.easymock.EasyMock; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class RetriableS3OutputStreamTest +{ + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + private final TestAmazonS3 s3 = new TestAmazonS3(0); + private final String path = "resultId"; + + + private S3OutputConfig config; + private long maxResultsSize; + private long chunkSize; + + @Before + public void setup() throws IOException + { + final File tempDir = temporaryFolder.newFolder(); + chunkSize = 10L; + config = new S3OutputConfig() + { + @Override + public File getTempDir() + { + return tempDir; + } + + @Override + public Long getChunkSize() + { + return chunkSize; + } + + @Override + public long getMaxResultsSize() + { + return maxResultsSize; + } + + @Override + public int getMaxTriesOnTransientError() + { + return 2; + } + }; + } + + @Test + public void testTooSmallChunkSize() throws IOException + { + maxResultsSize = 100000000000L; Review Comment: Added the underscore representation. ########## extensions-core/s3-extensions/src/test/java/org/apache/druid/storage/s3/output/RetriableS3OutputStreamTest.java: ########## @@ -0,0 +1,382 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.amazonaws.AmazonClientException; +import com.amazonaws.SdkClientException; +import com.amazonaws.services.s3.AmazonS3; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadResult; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import org.apache.druid.java.util.common.IOE; +import org.apache.druid.java.util.common.StringUtils; +import org.apache.druid.storage.s3.NoopServerSideEncryption; +import org.apache.druid.storage.s3.ServerSideEncryptingAmazonS3; +import org.easymock.EasyMock; +import org.hamcrest.CoreMatchers; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; +import org.junit.rules.TemporaryFolder; + +import javax.annotation.Nullable; +import java.io.File; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.List; +import java.util.stream.Collectors; + +public class RetriableS3OutputStreamTest +{ + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Rule + public ExpectedException expectedException = ExpectedException.none(); + + private final TestAmazonS3 s3 = new TestAmazonS3(0); + private final String path = "resultId"; + + + private S3OutputConfig config; + private long maxResultsSize; + private long chunkSize; + + @Before + public void setup() throws IOException + { + final File tempDir = temporaryFolder.newFolder(); + chunkSize = 10L; + config = new S3OutputConfig() + { + @Override + public File getTempDir() + { + return tempDir; + } + + @Override + public Long getChunkSize() + { + return chunkSize; + } + + @Override + public long getMaxResultsSize() + { + return maxResultsSize; + } + + @Override + public int getMaxTriesOnTransientError() + { + return 2; + } + }; + } + + @Test + public void testTooSmallChunkSize() throws IOException + { + maxResultsSize = 100000000000L; + chunkSize = 9000000L; + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage( + "chunkSize[9000000] is too small for maxResultsSize[100000000000]. chunkSize should be at least [10000000]" + ); + new RetriableS3OutputStream(config, s3, path).close(); + } + + @Test + public void testTooSmallChunkSizeMaxResultsSizeIsNotRetionalToMaxPartNum() throws IOException + { + maxResultsSize = 274877906944L; + chunkSize = 27487790; + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage( + "chunkSize[27487790] is too small for maxResultsSize[274877906944]. chunkSize should be at least [27487791]" + ); + new RetriableS3OutputStream(config, s3, path).close(); + } + + @Test + public void testTooLargeChunkSize() throws IOException + { + maxResultsSize = 1024L * 1024 * 1024 * 1024; + chunkSize = RetriableS3OutputStream.S3_MULTIPART_UPLOAD_MAX_PART_SIZE + 1; + + expectedException.expect(IllegalArgumentException.class); + expectedException.expectMessage( + "chunkSize[5368709121] should be smaller than [5368709120]" + ); + new RetriableS3OutputStream(config, s3, path).close(); + } + + @Test + public void testWriteAndHappy() throws IOException + { + maxResultsSize = 1000; + chunkSize = 10; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + for (int i = 0; i < 25; i++) { + bb.clear(); + bb.putInt(i); + out.write(bb.array()); + } + } + // each chunk is 10 bytes, so there should be 10 chunks. + Assert.assertEquals(10, s3.partRequests.size()); + s3.assertCompleted(chunkSize, Integer.BYTES * 25); + } + + @Test + public void testWriteSizeLargerThanConfiguredMaxChunkSizeShouldSucceed() throws IOException + { + maxResultsSize = 1000; + chunkSize = 10; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES * 3); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + bb.clear(); + bb.putInt(1); + bb.putInt(2); + bb.putInt(3); + out.write(bb.array()); + } + // each chunk 10 bytes, so there should be 2 chunks. + Assert.assertEquals(2, s3.partRequests.size()); + s3.assertCompleted(chunkSize, Integer.BYTES * 3); + } + + @Test + public void testWriteSmallBufferShouldSucceed() throws IOException + { + maxResultsSize = 1000; + chunkSize = 128; + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + for (int i = 0; i < 600; i++) { + out.write(i); + } + } + // each chunk 128 bytes, so there should be 5 chunks. + Assert.assertEquals(5, s3.partRequests.size()); + s3.assertCompleted(chunkSize, 600); + } + + @Test + public void testHitResultsSizeLimit() throws IOException + { + maxResultsSize = 50; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + for (int i = 0; i < 14; i++) { + bb.clear(); + bb.putInt(i); + out.write(bb.array()); + } + + Assert.assertThrows( + "Exceeded max results size [50]", + IOException.class, + () -> { + bb.clear(); + bb.putInt(14); + out.write(bb.array()); + } + ); + } + + s3.assertAborted(); + } + + @Test + public void testSuccessToUploadAfterRetry() throws IOException + { + final TestAmazonS3 s3 = new TestAmazonS3(1); + + maxResultsSize = 1000; + chunkSize = 10; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + for (int i = 0; i < 25; i++) { + bb.clear(); + bb.putInt(i); + out.write(bb.array()); + } + } + // each chunk is 10 bytes, so there should be 10 chunks. + Assert.assertEquals(10, s3.partRequests.size()); + s3.assertCompleted(chunkSize, Integer.BYTES * 25); + } + + @Test + public void testFailToUploadAfterRetries() throws IOException + { + final TestAmazonS3 s3 = new TestAmazonS3(3); + + maxResultsSize = 1000; + ByteBuffer bb = ByteBuffer.allocate(Integer.BYTES); + try (RetriableS3OutputStream out = new RetriableS3OutputStream( + config, + s3, + path, + false + )) { + for (int i = 0; i < 2; i++) { + bb.clear(); + bb.putInt(i); + out.write(bb.array()); + } + + expectedException.expect(RuntimeException.class); + expectedException.expectCause(CoreMatchers.instanceOf(AmazonClientException.class)); + expectedException.expectMessage("Upload failure test. Remaining failures [1]"); + bb.clear(); + bb.putInt(3); + out.write(bb.array()); + } + + s3.assertAborted(); + } + + private static class TestAmazonS3 extends ServerSideEncryptingAmazonS3 + { + private final List<UploadPartRequest> partRequests = new ArrayList<>(); + + private int uploadFailuresLeft; + private boolean aborted = false; + @Nullable + private CompleteMultipartUploadRequest completeRequest; + + private TestAmazonS3(int totalUploadFailures) + { + super(EasyMock.createMock(AmazonS3.class), new NoopServerSideEncryption()); Review Comment: The purpose of this class is to act as a hook so that no calls are getting through to s3. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
