This is an automated email from the ASF dual-hosted git repository.

luoc pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/drill.git


The following commit(s) were added to refs/heads/master by this push:
     new 1b62c8de1f DRILL-8200: Update Hadoop libs to ≥ 3.2.3 for 
CVE-2022-26612 (#2525)
1b62c8de1f is described below

commit 1b62c8de1f6d40015601f0fa7286e38d5cd84cf1
Author: James Turton <[email protected]>
AuthorDate: Sat Apr 30 11:21:40 2022 +0200

    DRILL-8200: Update Hadoop libs to ≥ 3.2.3 for CVE-2022-26612 (#2525)
    
    * Remove pointless Buffer casts.
    
    Compiling Drill with JDK > 8 will still result in ByteBuffer <-> Buffer cast
    exceptions at runtime when running on JDK 8 even though maven.target.version
    is set to 8. Setting maven.compiler.release to 8 solves the Buffer casts
    but raises a compilation error of package sun.security.jgss does not exist
    for JDK 8. There were a few handwritten casts to avoid the Buffer casting
    issue but many instances are not covered so the few reverted in this commit
    achieve nothing.
    
    * Update Hadoop to 3.2.3.
---
 contrib/storage-phoenix/README.md                  |   2 +-
 .../src/main/resources/winutils/hadoop.dll         | Bin 96256 -> 88576 bytes
 .../src/main/resources/winutils/winutils.exe       | Bin 118784 -> 118784 bytes
 docs/dev/HadoopWinutils.md                         |   2 +-
 .../parquet/hadoop/ColumnChunkIncReadStore.java    |   7 +++----
 5 files changed, 5 insertions(+), 6 deletions(-)

diff --git a/contrib/storage-phoenix/README.md 
b/contrib/storage-phoenix/README.md
index 01278b5a20..fa8467fe04 100644
--- a/contrib/storage-phoenix/README.md
+++ b/contrib/storage-phoenix/README.md
@@ -103,7 +103,7 @@ requires a recompilation of HBase because of incompatible 
changes between Hadoop
 
  1. Download HBase 2.4.2 sources and rebuild with Hadoop 3.
 
-    ```mvn clean install -DskipTests -Dhadoop.profile=3.0 
-Dhadoop-three.version=3.2.2```
+    ```mvn clean install -DskipTests -Dhadoop.profile=3.0 
-Dhadoop-three.version=3.2.3```
 
  2. Remove the `Ignore` annotation in `PhoenixTestSuite.java`.
     
diff --git a/distribution/src/main/resources/winutils/hadoop.dll 
b/distribution/src/main/resources/winutils/hadoop.dll
index 441d3edd7d..763c40acc4 100644
Binary files a/distribution/src/main/resources/winutils/hadoop.dll and 
b/distribution/src/main/resources/winutils/hadoop.dll differ
diff --git a/distribution/src/main/resources/winutils/winutils.exe 
b/distribution/src/main/resources/winutils/winutils.exe
index 75be699559..b2c4819bf7 100644
Binary files a/distribution/src/main/resources/winutils/winutils.exe and 
b/distribution/src/main/resources/winutils/winutils.exe differ
diff --git a/docs/dev/HadoopWinutils.md b/docs/dev/HadoopWinutils.md
index a9ead275ad..aab93fa06e 100644
--- a/docs/dev/HadoopWinutils.md
+++ b/docs/dev/HadoopWinutils.md
@@ -3,7 +3,7 @@
 Hadoop Winutils native libraries are required to run Drill on Windows. The 
last version present in maven repository is 2.7.1 and is not updated anymore.
 That's why Winutils version matching Hadoop version used in Drill is located 
in distribution/src/main/resources.
 
-Current Winutils version: *3.2.2.*
+Current Winutils version: *3.2.3.*
 
 ## References
 - Official wiki: [Windows 
Problems](https://cwiki.apache.org/confluence/display/HADOOP2/WindowsProblems).
diff --git 
a/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
 
b/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
index 3ad0a7ac97..773a861213 100644
--- 
a/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
+++ 
b/exec/java-exec/src/main/java/org/apache/parquet/hadoop/ColumnChunkIncReadStore.java
@@ -18,7 +18,6 @@
 package org.apache.parquet.hadoop;
 
 import java.io.IOException;
-import java.nio.Buffer;
 import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.HashMap;
@@ -216,20 +215,20 @@ public class ColumnChunkIncReadStore implements 
PageReadStore {
               // Note that the repetition and definition levels are stored 
uncompressed in
               // the v2 page format.
               int pageBufOffset = 0;
-              ByteBuffer bb = (ByteBuffer) 
((Buffer)pageBuf).position(pageBufOffset);
+              ByteBuffer bb = (ByteBuffer) pageBuf.position(pageBufOffset);
               BytesInput repLevelBytes = BytesInput.from(
                 (ByteBuffer) bb.slice().limit(pageBufOffset + repLevelSize)
               );
               pageBufOffset += repLevelSize;
 
-              bb = (ByteBuffer) ((Buffer)pageBuf).position(pageBufOffset);
+              bb = (ByteBuffer) pageBuf.position(pageBufOffset);
               final BytesInput defLevelBytes = BytesInput.from(
                 (ByteBuffer) bb.slice().limit(pageBufOffset + defLevelSize)
               );
               pageBufOffset += defLevelSize;
 
               // we've now reached the beginning of compressed column data
-              bb = (ByteBuffer) ((Buffer)pageBuf).position(pageBufOffset);
+              bb = (ByteBuffer) pageBuf.position(pageBufOffset);
               final BytesInput colDataBytes = decompressor.decompress(
                 BytesInput.from((ByteBuffer) bb.slice()),
                 pageSize - repLevelSize - defLevelSize

Reply via email to