This is an automated email from the ASF dual-hosted git repository.

dongjoon pushed a commit to branch branch-1.9
in repository https://gitbox.apache.org/repos/asf/orc.git


The following commit(s) were added to refs/heads/branch-1.9 by this push:
     new ec5f53a6e ORC-1505: Upgrade Spark to 3.5.0
ec5f53a6e is described below

commit ec5f53a6efdebbe99b7a3bee95fe6184811e865c
Author: Dongjoon Hyun <[email protected]>
AuthorDate: Tue Sep 19 16:38:04 2023 -0700

    ORC-1505: Upgrade Spark to 3.5.0
    
    ### What changes were proposed in this pull request?
    
    This PR aims to upgrade the benchmark module to use Spark 3.5.0 and its 
dependencies.
    
    ### Why are the changes needed?
    
    Since Apache ORC 1.9.x is maintained for Apache Spark 3.5.0, we had better 
have a test coverage for Apache Spark 3.5.0.
    
    ### How was this patch tested?
    
    Pass the CIs.
    
    Closes #1618 from dongjoon-hyun/ORC-1505.
    
    Authored-by: Dongjoon Hyun <[email protected]>
    Signed-off-by: Dongjoon Hyun <[email protected]>
    (cherry picked from commit f8edba8875f0c2b87974d435ac45a6323172244b)
    Signed-off-by: Dongjoon Hyun <[email protected]>
---
 java/bench/pom.xml                                               | 6 +++---
 .../src/java/org/apache/orc/bench/spark/SparkBenchmark.java      | 9 ++++++---
 2 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/java/bench/pom.xml b/java/bench/pom.xml
index 25a8192cd..a310df100 100644
--- a/java/bench/pom.xml
+++ b/java/bench/pom.xml
@@ -40,7 +40,7 @@
     <junit.version>5.9.3</junit.version>
     <orc.version>${project.version}</orc.version>
     <parquet.version>1.13.1</parquet.version>
-    <spark.version>3.4.1</spark.version>
+    <spark.version>3.5.0</spark.version>
   </properties>
 
   <dependencyManagement>
@@ -91,7 +91,7 @@
       <dependency>
         <groupId>io.netty</groupId>
         <artifactId>netty-all</artifactId>
-        <version>4.1.86.Final</version>
+        <version>4.1.96.Final</version>
         <scope>runtime</scope>
       </dependency>
       <dependency>
@@ -423,7 +423,7 @@
       <dependency>
         <groupId>org.scala-lang</groupId>
         <artifactId>scala-library</artifactId>
-        <version>2.12.15</version>
+        <version>2.12.18</version>
       </dependency>
       <dependency>
         <groupId>org.slf4j</groupId>
diff --git 
a/java/bench/spark/src/java/org/apache/orc/bench/spark/SparkBenchmark.java 
b/java/bench/spark/src/java/org/apache/orc/bench/spark/SparkBenchmark.java
index 4991c030c..dc1dcf6f1 100644
--- a/java/bench/spark/src/java/org/apache/orc/bench/spark/SparkBenchmark.java
+++ b/java/bench/spark/src/java/org/apache/orc/bench/spark/SparkBenchmark.java
@@ -202,7 +202,8 @@ public class SparkBenchmark implements OrcBenchmark {
             
JavaConverters.collectionAsScalaIterableConverter(filters).asScala().toSeq(),
             scalaMap, source.conf);
     PartitionedFile file = new PartitionedFile(InternalRow.empty(),
-        SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 
0L);
+        SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 
0L,
+        Map$.MODULE$.empty());
     processReader(factory.apply(file), statistics, counters, blackhole);
   }
 
@@ -250,7 +251,8 @@ public class SparkBenchmark implements OrcBenchmark {
             
JavaConverters.collectionAsScalaIterableConverter(filters).asScala().toSeq(),
             scalaMap, source.conf);
     PartitionedFile file = new PartitionedFile(InternalRow.empty(),
-        SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 
0L);
+        SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 
0L,
+        Map$.MODULE$.empty());
     processReader(factory.apply(file), statistics, counters, blackhole);
   }
 
@@ -302,7 +304,8 @@ public class SparkBenchmark implements OrcBenchmark {
             
JavaConverters.collectionAsScalaIterableConverter(filters).asScala().toSeq(),
             scalaMap, source.conf);
     PartitionedFile file = new PartitionedFile(InternalRow.empty(),
-        SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 
0L);
+        SparkPath.fromPath(source.path), 0, Long.MAX_VALUE, new String[0], 0L, 
0L,
+        Map$.MODULE$.empty());
     processReader(factory.apply(file), statistics, counters, blackhole);
   }
 }

Reply via email to