cryptoe commented on code in PR #12874: URL: https://github.com/apache/druid/pull/12874#discussion_r942532676
########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/RetriableS3OutputStream.java: ########## @@ -0,0 +1,432 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Stopwatch; +import com.google.common.io.CountingOutputStream; +import it.unimi.dsi.fastutil.io.FastBufferedOutputStream; +import org.apache.druid.java.util.common.FileUtils; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.IOE; +import org.apache.druid.java.util.common.RetryUtils; +import org.apache.druid.java.util.common.io.Closer; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.storage.s3.S3Utils; +import org.apache.druid.storage.s3.ServerSideEncryptingAmazonS3; + +import java.io.Closeable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * A retriable output stream for s3. How it works is, + * <p> + * 1) When new data is written, it first creates a chunk in local disk. + * 2) New data is written to the local chunk until it is full. + * 3) When the chunk is full, it uploads the chunk to s3 using the multipart upload API. + * Since this happens synchronously, {@link #write(byte[], int, int)} can be blocked until the upload is done. + * The upload can be retries when it fails with transient errors. + * 4) Once the upload succeeds, it creates a new chunk and continue. + * 5) When the stream is closed, it uploads the last chunk and finalize the multipart upload. + * {@link #close()} can be blocked until upload is done. + * <p> + * For compression format support, this output stream supports compression formats if they are <i>concatenatable</i>, + * such as ZIP or GZIP. + * <p> + * This class is not thread-safe. + * <p> + * This class can be moved to the s3 extension as a low-level API, + * whereas it currently provides only high-level APIs such as S3DataSegmentPuller. + */ +public class RetriableS3OutputStream extends OutputStream +{ + public static final long S3_MULTIPART_UPLOAD_MIN_PART_SIZE = 5L * 1024 * 1024; + public static final long S3_MULTIPART_UPLOAD_MAX_PART_SIZE = 5L * 1024 * 1024 * 1024L; + + private static final Logger LOG = new Logger(RetriableS3OutputStream.class); + private static final Joiner JOINER = Joiner.on("/").skipNulls(); + private static final int S3_MULTIPART_UPLOAD_MAX_NUM_PARTS = 10_000; + + private final S3OutputConfig config; + private final ServerSideEncryptingAmazonS3 s3; + private final String s3Key; + private final String uploadId; + private final File chunkStorePath; + private final long chunkSize; + + private final List<PartETag> pushResults = new ArrayList<>(); + private final byte[] singularBuffer = new byte[1]; + + // metric + private final Stopwatch pushStopwatch; + + private Chunk currentChunk; + private int nextChunkId = 1; // multipart upload requires partNumber to be in the range between 1 and 10000 + private int numChunksPushed; + /** + * Total size of all chunks. This size is updated whenever the chunk is ready for push, + * not when {@link #write(byte[], int, int)} is called. This is because + * it will be hard to know the increase of chunk size in write() when the chunk is compressed. Review Comment: The initial idea was to use a gzinputstream for the chunks but dropped that idea in the implementation. Updated the comment. ########## extensions-core/s3-extensions/src/main/java/org/apache/druid/storage/s3/output/RetriableS3OutputStream.java: ########## @@ -0,0 +1,432 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.druid.storage.s3.output; + +import com.amazonaws.AmazonServiceException; +import com.amazonaws.services.s3.model.AbortMultipartUploadRequest; +import com.amazonaws.services.s3.model.CompleteMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadRequest; +import com.amazonaws.services.s3.model.InitiateMultipartUploadResult; +import com.amazonaws.services.s3.model.ObjectMetadata; +import com.amazonaws.services.s3.model.PartETag; +import com.amazonaws.services.s3.model.UploadPartRequest; +import com.amazonaws.services.s3.model.UploadPartResult; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Joiner; +import com.google.common.base.Stopwatch; +import com.google.common.io.CountingOutputStream; +import it.unimi.dsi.fastutil.io.FastBufferedOutputStream; +import org.apache.druid.java.util.common.FileUtils; +import org.apache.druid.java.util.common.IAE; +import org.apache.druid.java.util.common.IOE; +import org.apache.druid.java.util.common.RetryUtils; +import org.apache.druid.java.util.common.io.Closer; +import org.apache.druid.java.util.common.logger.Logger; +import org.apache.druid.storage.s3.S3Utils; +import org.apache.druid.storage.s3.ServerSideEncryptingAmazonS3; + +import java.io.Closeable; +import java.io.File; +import java.io.FileNotFoundException; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.OutputStream; +import java.util.ArrayList; +import java.util.List; +import java.util.Objects; +import java.util.UUID; +import java.util.concurrent.TimeUnit; + +/** + * A retriable output stream for s3. How it works is, + * <p> + * 1) When new data is written, it first creates a chunk in local disk. + * 2) New data is written to the local chunk until it is full. + * 3) When the chunk is full, it uploads the chunk to s3 using the multipart upload API. + * Since this happens synchronously, {@link #write(byte[], int, int)} can be blocked until the upload is done. + * The upload can be retries when it fails with transient errors. + * 4) Once the upload succeeds, it creates a new chunk and continue. + * 5) When the stream is closed, it uploads the last chunk and finalize the multipart upload. + * {@link #close()} can be blocked until upload is done. + * <p> + * For compression format support, this output stream supports compression formats if they are <i>concatenatable</i>, + * such as ZIP or GZIP. + * <p> + * This class is not thread-safe. + * <p> + * This class can be moved to the s3 extension as a low-level API, + * whereas it currently provides only high-level APIs such as S3DataSegmentPuller. + */ +public class RetriableS3OutputStream extends OutputStream +{ + public static final long S3_MULTIPART_UPLOAD_MIN_PART_SIZE = 5L * 1024 * 1024; + public static final long S3_MULTIPART_UPLOAD_MAX_PART_SIZE = 5L * 1024 * 1024 * 1024L; + + private static final Logger LOG = new Logger(RetriableS3OutputStream.class); + private static final Joiner JOINER = Joiner.on("/").skipNulls(); + private static final int S3_MULTIPART_UPLOAD_MAX_NUM_PARTS = 10_000; + + private final S3OutputConfig config; + private final ServerSideEncryptingAmazonS3 s3; + private final String s3Key; + private final String uploadId; + private final File chunkStorePath; + private final long chunkSize; + + private final List<PartETag> pushResults = new ArrayList<>(); + private final byte[] singularBuffer = new byte[1]; + + // metric + private final Stopwatch pushStopwatch; + + private Chunk currentChunk; + private int nextChunkId = 1; // multipart upload requires partNumber to be in the range between 1 and 10000 + private int numChunksPushed; + /** + * Total size of all chunks. This size is updated whenever the chunk is ready for push, + * not when {@link #write(byte[], int, int)} is called. This is because + * it will be hard to know the increase of chunk size in write() when the chunk is compressed. Review Comment: The initial idea was to use a gzinputstream for the chunks but dropped that idea in the implementation. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
