This is an automated email from the ASF dual-hosted git repository. jsedding pushed a commit to branch jsedding/OAK-12096-memory-usage-regression in repository https://gitbox.apache.org/repos/asf/jackrabbit-oak.git
commit a332fde5f3abb4ba8b38c09e5082d16f380ac1b9 Author: Julian Sedding <[email protected]> AuthorDate: Fri Feb 13 13:12:50 2026 +0100 OAK-12094 - segment-azure: increased heap usage due to OAK-12040 --- .../jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java index 5daeb3586f..9f28315bc9 100644 --- a/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java +++ b/oak-segment-azure/src/main/java/org/apache/jackrabbit/oak/segment/azure/AzureSegmentArchiveWriter.java @@ -34,6 +34,7 @@ import org.apache.jackrabbit.oak.segment.spi.monitor.IOMonitor; import java.io.ByteArrayInputStream; import java.io.File; import java.io.IOException; +import java.util.Arrays; import java.util.concurrent.TimeUnit; import static org.apache.jackrabbit.oak.segment.azure.AzureUtilities.readBufferFully; @@ -82,8 +83,9 @@ public class AzureSegmentArchiveWriter extends AbstractRemoteSegmentArchiveWrite // Upload the binary data and set its metadata using a single HTTP call, // overwriting an existing blob if necessary. Wrapping the byte array in a // ByteArrayInputStream avoids creating a copy of the data range. - BinaryData binaryData = BinaryData.fromStream(new ByteArrayInputStream(data, offset, size), (long) size); - BlockBlobSimpleUploadOptions options = new BlockBlobSimpleUploadOptions(binaryData) + // Note: OAK-12094 fixes an interesting regression in heap usage, which + // should be read for context when making changes in the lines below. + BlockBlobSimpleUploadOptions options = new BlockBlobSimpleUploadOptions(new ByteArrayInputStream(data, offset, size), size) .setMetadata(AzureBlobMetadata.toSegmentMetadata(indexEntry)); blob.uploadWithResponse(options, null, Context.NONE); } catch (BlobStorageException e) {
