This is an automated email from the ASF dual-hosted git repository.
daim pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/jackrabbit-oak.git
The following commit(s) were added to refs/heads/trunk by this push:
new 50e1040c56 OAK-12062 : fixed NPE while uploading metadata for AWS S3
(#2688)
50e1040c56 is described below
commit 50e1040c5602895b65b4d8e2a882b824e274a103
Author: Rishabh Kumar <[email protected]>
AuthorDate: Wed Jan 21 16:12:04 2026 +0530
OAK-12062 : fixed NPE while uploading metadata for AWS S3 (#2688)
* OAK-12062 : fixed NPE while uploading metadata for AWS S3
* OAK-12062 : removed temp file in finally method
---
.../jackrabbit/oak/blob/cloud/s3/S3Backend.java | 82 +++++++++++-----------
1 file changed, 41 insertions(+), 41 deletions(-)
diff --git
a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
index 9087e6a109..d79297f11b 100644
---
a/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
+++
b/oak-blob-cloud/src/main/java/org/apache/jackrabbit/oak/blob/cloud/s3/S3Backend.java
@@ -19,13 +19,13 @@ package org.apache.jackrabbit.oak.blob.cloud.s3;
import java.io.ByteArrayInputStream;
import java.io.File;
import java.io.FileInputStream;
-import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
+import java.nio.file.Files;
import java.time.Duration;
import java.time.Instant;
import java.util.ArrayList;
@@ -514,6 +514,7 @@ public class S3Backend extends AbstractSharedBackend {
// Executor required to handle reading from the InputStream on a
separate thread so the main upload is not blocked.
final ExecutorService executor = Executors.newSingleThreadExecutor();
+ File tempFile = null;
try {
Thread.currentThread().setContextClassLoader(getClass().getClassLoader());
final PutObjectRequest.Builder builder = PutObjectRequest.builder()
@@ -521,8 +522,33 @@ public class S3Backend extends AbstractSharedBackend {
.contentType("application/octet-stream")
.key(addMetaKeyPrefix(name));
+ InputStream uploadStream = input;
+ final long length;
+
+ if (input instanceof FileInputStream) {
+ // if the file is modified after opening, the size may not
reflect the latest changes
+ FileInputStream fis = (FileInputStream) input;
+ length = fis.getChannel().size();
+ } else if (input instanceof ByteArrayInputStream) {
+ length = input.available();
+ } else if (input.markSupported()) {
+ // in case the inputStream supports mark & reset
+ input.mark(Integer.MAX_VALUE);
+ length = IOUtils.consume(input);
+ input.reset();
+ } else {
+ // we have to read all the stream to get the actual length
+ // last else block: store to temp file and re-read
+ tempFile = File.createTempFile("s3backend-", ".tmp");
+ try (OutputStream out =
Files.newOutputStream(tempFile.toPath())) {
+ IOUtils.copy(input, out);
+ }
+ length = tempFile.length();
+ uploadStream = Files.newInputStream(tempFile.toPath());
+ }
+
// Specify `null` for the content length when you don't know the
content length.
- final AsyncRequestBody body = getRequestBody(input, executor,
builder);
+ final AsyncRequestBody body = getRequestBody(uploadStream, length,
executor, builder);
final Upload upload = tmx.upload(uploadReq ->
uploadReq.requestBody(body).
putObjectRequest(
@@ -530,12 +556,19 @@ public class S3Backend extends AbstractSharedBackend {
.build());
upload.completionFuture().join();
} catch (Exception e) {
- LOG.error("Exception in uploading {}", e.getMessage());
+ LOG.error("Exception in uploading metadata file", e);
throw new DataStoreException("Error in uploading metadata file",
e);
} finally {
if (contextClassLoader != null) {
Thread.currentThread().setContextClassLoader(contextClassLoader);
}
+ if (tempFile != null) {
+ try {
+ Files.deleteIfExists(tempFile.toPath());
+ } catch (IOException e) {
+ LOG.warn("Failed to delete temp file {}", tempFile, e);
+ }
+ }
executor.shutdown();
try {
if (!executor.awaitTermination(60, TimeUnit.SECONDS)) {
@@ -1332,44 +1365,11 @@ public class S3Backend extends AbstractSharedBackend {
}
@NotNull
- private AsyncRequestBody getRequestBody(final InputStream input, final
ExecutorService executor,
- final PutObjectRequest.Builder
builder) throws IOException {
- final AsyncRequestBody body;
- if (Objects.equals(RemoteStorageMode.S3,
properties.get(S3Constants.MODE))) {
- body = AsyncRequestBody.fromInputStream(input, null, executor);
- } else {
- // for GCP we need to know the length in advance, else it won't
work.
- final long length;
- if (input instanceof FileInputStream) {
- final FileInputStream fis = (FileInputStream) input;
- // if the file is modified after opening, the size may not
reflect the latest changes
- length = fis.getChannel().size();
- body = AsyncRequestBody.fromInputStream(input, length,
executor);
- } else if (input instanceof ByteArrayInputStream) {
- length = input.available();
- body = AsyncRequestBody.fromInputStream(input, length,
executor);
- } else if (input.markSupported()) {
- // in case the inputStream supports mark & reset
- input.mark(Integer.MAX_VALUE);
- length = IOUtils.consume(input);
- input.reset();
- body = AsyncRequestBody.fromInputStream(input, length,
executor);
- } else {
- // we have to read all the stream to get the actual length
- // last else block: store to temp file and re-read
- final File tempFile = File.createTempFile("inputstream-",
".tmp");
- tempFile.deleteOnExit(); // Clean up after JVM exits
-
- try (OutputStream out = new FileOutputStream(tempFile)) {
- IOUtils.copy(input, out); // Copy all bytes to file
- }
- // Get length from file
- length = tempFile.length();
- // Re-create InputStream from temp file
- body = AsyncRequestBody.fromInputStream(new
FileInputStream(tempFile), length, executor);
- }
- builder.contentLength(length);
- }
+ private AsyncRequestBody getRequestBody(final InputStream input, final
long length, final ExecutorService executor,
+ final PutObjectRequest.Builder
builder) {
+ // for both AWS/GCP we need to know the length in advance, else it
won't work.
+ AsyncRequestBody body = AsyncRequestBody.fromInputStream(input,
length, executor);
+ builder.contentLength(length);
return body;
}