This is an automated email from the ASF dual-hosted git repository.

slfan1989 pushed a commit to branch branch-3.2
in repository https://gitbox.apache.org/repos/asf/hadoop.git


The following commit(s) were added to refs/heads/branch-3.2 by this push:
     new 45b7a025fc6 MAPREDUCE-7446. Fix NegativeArraySizeException in IFile's 
readRawValue method (#5918)
45b7a025fc6 is described below

commit 45b7a025fc67759effd6093bbf29028c3572dd1c
Author: Peter Szucs <116345192+p-sz...@users.noreply.github.com>
AuthorDate: Sat Aug 5 11:39:18 2023 +0200

    MAPREDUCE-7446. Fix NegativeArraySizeException in IFile's readRawValue 
method (#5918)
    
    Co-authored-by: slfan1989 <slfan1...@apache.org>
---
 .../src/main/java/org/apache/hadoop/mapred/IFile.java       | 13 ++++++++-----
 1 file changed, 8 insertions(+), 5 deletions(-)

diff --git 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
index 1a917e15070..d4117665b6a 100644
--- 
a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
+++ 
b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/IFile.java
@@ -58,6 +58,7 @@ import org.slf4j.LoggerFactory;
 public class IFile {
   private static final Logger LOG = LoggerFactory.getLogger(IFile.class);
   public static final int EOF_MARKER = -1; // End of File Marker
+  private static final int ARRAY_MAX_SIZE = Integer.MAX_VALUE - 8;
   
   /**
    * <code>IFile.Writer</code> to write out intermediate map-outputs. 
@@ -152,7 +153,7 @@ public class IFile {
       // Write EOF_MARKER for key/value length
       WritableUtils.writeVInt(out, EOF_MARKER);
       WritableUtils.writeVInt(out, EOF_MARKER);
-      decompressedBytesWritten += 2 * WritableUtils.getVIntSize(EOF_MARKER);
+      decompressedBytesWritten += (long) 2 * 
WritableUtils.getVIntSize(EOF_MARKER);
       
       //Flush the stream
       out.flush();
@@ -219,7 +220,7 @@ public class IFile {
       buffer.reset();
       
       // Update bytes written
-      decompressedBytesWritten += keyLength + valueLength + 
+      decompressedBytesWritten += (long) keyLength + valueLength +
                                   WritableUtils.getVIntSize(keyLength) + 
                                   WritableUtils.getVIntSize(valueLength);
       ++numRecordsWritten;
@@ -245,7 +246,7 @@ public class IFile {
       out.write(value.getData(), value.getPosition(), valueLength); 
 
       // Update bytes written
-      decompressedBytesWritten += keyLength + valueLength + 
+      decompressedBytesWritten += (long) keyLength + valueLength +
                       WritableUtils.getVIntSize(keyLength) + 
                       WritableUtils.getVIntSize(valueLength);
       ++numRecordsWritten;
@@ -394,7 +395,7 @@ public class IFile {
       // Read key and value lengths
       currentKeyLength = WritableUtils.readVInt(dIn);
       currentValueLength = WritableUtils.readVInt(dIn);
-      bytesRead += WritableUtils.getVIntSize(currentKeyLength) +
+      bytesRead += (long) WritableUtils.getVIntSize(currentKeyLength) +
                    WritableUtils.getVIntSize(currentValueLength);
       
       // Check for EOF
@@ -433,8 +434,10 @@ public class IFile {
     }
     
     public void nextRawValue(DataInputBuffer value) throws IOException {
+      final int targetSize = currentValueLength << 1;
+
       final byte[] valBytes = (value.getData().length < currentValueLength)
-        ? new byte[currentValueLength << 1]
+        ? new byte[targetSize < 0 ? ARRAY_MAX_SIZE : targetSize]
         : value.getData();
       int i = readData(valBytes, 0, currentValueLength);
       if (i != currentValueLength) {


---------------------------------------------------------------------
To unsubscribe, e-mail: common-commits-unsubscr...@hadoop.apache.org
For additional commands, e-mail: common-commits-h...@hadoop.apache.org

Reply via email to