Github user dongjoon-hyun commented on a diff in the pull request:

    https://github.com/apache/spark/pull/21206#discussion_r185293327
  
    --- Diff: 
sql/core/src/main/java/org/apache/spark/sql/execution/vectorized/WritableColumnVector.java
 ---
    @@ -92,17 +92,22 @@ public void reserve(int requiredCapacity) {
           } else {
             throwUnsupportedException(requiredCapacity, null);
           }
    +    } else if (requiredCapacity < 0) {
    +      throwUnsupportedException(requiredCapacity, null);
         }
       }
     
       private void throwUnsupportedException(int requiredCapacity, Throwable 
cause) {
    -    String message = "Cannot reserve additional contiguous bytes in the 
vectorized reader " +
    -        "(requested = " + requiredCapacity + " bytes). As a workaround, 
you can disable the " +
    -        "vectorized reader, or increase the vectorized reader batch size. 
For parquet file " +
    -        "format, refer to " + 
SQLConf.PARQUET_VECTORIZED_READER_ENABLED().key() + " and " +
    -        SQLConf.PARQUET_VECTORIZED_READER_BATCH_SIZE().key() + "; for orc 
file format, refer to " +
    -        SQLConf.ORC_VECTORIZED_READER_ENABLED().key() + " and " +
    -        SQLConf.ORC_VECTORIZED_READER_BATCH_SIZE().key() + ".";
    +    String message = "Cannot reserve additional contiguous bytes in the 
vectorized reader (" +
    +        (requiredCapacity >= 0 ?"requested " + requiredCapacity + " bytes" 
: "integer overflow") +
    --- End diff --
    
    `?"requested "` -> `? "requested "`


---

---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to