This is an automated email from the ASF dual-hosted git repository.

mbutrovich pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/datafusion-comet.git


The following commit(s) were added to refs/heads/main by this push:
     new f0f0653a2 refactor: Scala hygiene - remove 
`scala.collection.JavaConverters` (#2393)
f0f0653a2 is described below

commit f0f0653a286e40fd16e31f026e61f46eb906f424
Author: hsiang-c <[email protected]>
AuthorDate: Tue Sep 16 09:09:51 2025 -0700

    refactor: Scala hygiene - remove `scala.collection.JavaConverters` (#2393)
    
    * Use scala-collection-compat for Spark 3.4
    
    * Use scala.jdk.CollectionConverters for Scala files
    
    * Use scala.jdk.javaapi.CollectionConverters for Java files
    
    * Include scala-collection-compat
    
    * Allow scala-collection-compat in JAR
    
    * Exclude scala-collection-compat to avoid duplicated classes
---
 common/pom.xml                                          |  4 ++++
 .../org/apache/comet/parquet/NativeBatchReader.java     | 17 +++++++++--------
 .../org/apache/comet/parquet/NativeColumnReader.java    |  6 +++---
 .../spark/sql/comet/execution/arrow/ArrowWriters.scala  |  2 +-
 .../sql/comet/parquet/CometParquetReadSupport.scala     |  2 +-
 .../scala/org/apache/spark/sql/comet/util/Utils.scala   |  2 +-
 .../java/org/apache/comet/parquet/TestColumnReader.java |  5 ++---
 dev/ensure-jars-have-correct-contents.sh                |  6 ++++++
 fuzz-testing/pom.xml                                    | 12 ++++++++++++
 pom.xml                                                 |  5 +++++
 spark/pom.xml                                           |  1 +
 .../execution/shuffle/CometUnsafeShuffleWriter.java     |  5 +++--
 .../apache/comet/parquet/CometParquetFileFormat.scala   |  6 +++---
 .../parquet/CometParquetPartitionReaderFactory.scala    |  4 ++--
 .../org/apache/comet/parquet/CometParquetScan.scala     |  2 +-
 .../scala/org/apache/comet/parquet/ParquetFilters.scala |  3 +--
 .../scala/org/apache/comet/rules/CometScanRule.scala    |  5 +++--
 .../scala/org/apache/comet/serde/CometProject.scala     |  2 +-
 .../main/scala/org/apache/comet/serde/CometSort.scala   |  2 +-
 .../scala/org/apache/comet/serde/QueryPlanSerde.scala   |  2 +-
 .../main/scala/org/apache/comet/serde/aggregates.scala  |  2 +-
 .../main/scala/org/apache/comet/serde/conditional.scala |  2 +-
 .../main/scala/org/apache/comet/serde/predicates.scala  |  2 +-
 .../src/main/scala/org/apache/comet/serde/structs.scala |  2 +-
 .../apache/spark/sql/comet/CometColumnarToRowExec.scala |  2 +-
 .../org/apache/spark/sql/comet/CometExecUtils.scala     |  2 +-
 .../org/apache/spark/sql/comet/CometMetricNode.scala    |  2 +-
 .../execution/shuffle/CometNativeShuffleWriter.scala    |  2 +-
 .../comet/execution/shuffle/CometShuffleManager.scala   |  2 +-
 .../test/scala/org/apache/comet/WithHdfsCluster.scala   |  2 +-
 .../org/apache/spark/sql/CometTPCDSQueryTestSuite.scala |  2 +-
 .../org/apache/spark/sql/CometTPCHQuerySuite.scala      |  2 +-
 .../apache/spark/sql/benchmark/CometReadBenchmark.scala |  2 +-
 33 files changed, 74 insertions(+), 45 deletions(-)

diff --git a/common/pom.xml b/common/pom.xml
index 069f02389..d14d0e2dd 100644
--- a/common/pom.xml
+++ b/common/pom.xml
@@ -63,6 +63,10 @@ under the License.
       <groupId>org.apache.arrow</groupId>
       <artifactId>arrow-c-data</artifactId>
     </dependency>
+    <dependency>
+      <groupId>org.scala-lang.modules</groupId>
+      <artifactId>scala-collection-compat_${scala.binary.version}</artifactId>
+    </dependency>
     <dependency>
       <groupId>junit</groupId>
       <artifactId>junit</artifactId>
diff --git 
a/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java 
b/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java
index 5c56e332b..67c277540 100644
--- a/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java
+++ b/common/src/main/java/org/apache/comet/parquet/NativeBatchReader.java
@@ -30,7 +30,6 @@ import java.util.*;
 import java.util.stream.Collectors;
 
 import scala.Option;
-import scala.collection.JavaConverters;
 import scala.collection.Seq;
 import scala.collection.mutable.Buffer;
 
@@ -81,6 +80,8 @@ import org.apache.comet.shims.ShimFileFormat;
 import org.apache.comet.vector.CometVector;
 import org.apache.comet.vector.NativeUtil;
 
+import static scala.jdk.javaapi.CollectionConverters.*;
+
 /**
  * A vectorized Parquet reader that reads a Parquet file in a batched fashion.
  *
@@ -255,7 +256,7 @@ public class NativeBatchReader extends RecordReader<Void, 
ColumnarBatch> impleme
     ParquetReadOptions readOptions = builder.build();
 
     Map<String, String> objectStoreOptions =
-        
JavaConverters.mapAsJavaMap(NativeConfig.extractObjectStoreOptions(conf, 
file.pathUri()));
+        asJava(NativeConfig.extractObjectStoreOptions(conf, file.pathUri()));
 
     // TODO: enable off-heap buffer when they are ready
     ReadOptions cometReadOptions = ReadOptions.builder(conf).build();
@@ -306,7 +307,7 @@ public class NativeBatchReader extends RecordReader<Void, 
ColumnarBatch> impleme
       List<Type> fields = requestedSchema.getFields();
       List<Type> fileFields = fileSchema.getFields();
       ParquetColumn[] parquetFields =
-          JavaConverters.seqAsJavaList(parquetColumn.children()).toArray(new 
ParquetColumn[0]);
+          asJava(parquetColumn.children()).toArray(new ParquetColumn[0]);
       int numColumns = fields.size();
       if (partitionSchema != null) numColumns += partitionSchema.size();
       columnReaders = new AbstractColumnReader[numColumns];
@@ -618,14 +619,14 @@ public class NativeBatchReader extends RecordReader<Void, 
ColumnarBatch> impleme
   }
 
   private void checkParquetType(ParquetColumn column) throws IOException {
-    String[] path = JavaConverters.seqAsJavaList(column.path()).toArray(new 
String[0]);
+    String[] path = asJava(column.path()).toArray(new String[0]);
     if (containsPath(fileSchema, path)) {
       if (column.isPrimitive()) {
         ColumnDescriptor desc = column.descriptor().get();
         ColumnDescriptor fd = fileSchema.getColumnDescription(desc.getPath());
         TypeUtil.checkParquetType(fd, column.sparkType());
       } else {
-        for (ParquetColumn childColumn : 
JavaConverters.seqAsJavaList(column.children())) {
+        for (ParquetColumn childColumn : asJava(column.children())) {
           checkColumn(childColumn);
         }
       }
@@ -645,7 +646,7 @@ public class NativeBatchReader extends RecordReader<Void, 
ColumnarBatch> impleme
    * file schema, or whether it conforms to the type of the file schema.
    */
   private void checkColumn(ParquetColumn column) throws IOException {
-    String[] path = JavaConverters.seqAsJavaList(column.path()).toArray(new 
String[0]);
+    String[] path = asJava(column.path()).toArray(new String[0]);
     if (containsPath(fileSchema, path)) {
       if (column.isPrimitive()) {
         ColumnDescriptor desc = column.descriptor().get();
@@ -654,7 +655,7 @@ public class NativeBatchReader extends RecordReader<Void, 
ColumnarBatch> impleme
           throw new UnsupportedOperationException("Schema evolution not 
supported.");
         }
       } else {
-        for (ParquetColumn childColumn : 
JavaConverters.seqAsJavaList(column.children())) {
+        for (ParquetColumn childColumn : asJava(column.children())) {
           checkColumn(childColumn);
         }
       }
@@ -805,7 +806,7 @@ public class NativeBatchReader extends RecordReader<Void, 
ColumnarBatch> impleme
   @SuppressWarnings("deprecation")
   private int loadNextBatch() throws Throwable {
 
-    for (ParquetColumn childColumn : 
JavaConverters.seqAsJavaList(parquetColumn.children())) {
+    for (ParquetColumn childColumn : asJava(parquetColumn.children())) {
       checkParquetType(childColumn);
     }
 
diff --git 
a/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java 
b/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java
index 88447c147..b170ae583 100644
--- a/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java
+++ b/common/src/main/java/org/apache/comet/parquet/NativeColumnReader.java
@@ -33,6 +33,8 @@ import org.apache.spark.sql.types.DataType;
 import org.apache.comet.CometSchemaImporter;
 import org.apache.comet.vector.*;
 
+import static scala.jdk.javaapi.CollectionConverters.*;
+
 // TODO: extend ColumnReader instead of AbstractColumnReader to reduce code 
duplication
 public class NativeColumnReader extends AbstractColumnReader {
   protected static final Logger LOG = 
LoggerFactory.getLogger(NativeColumnReader.class);
@@ -145,9 +147,7 @@ public class NativeColumnReader extends 
AbstractColumnReader {
     ArrowSchema[] schemas = {schema};
 
     CometDecodedVector cometVector =
-        (CometDecodedVector)
-            
scala.collection.JavaConverters.seqAsJavaList(nativeUtil.importVector(arrays, 
schemas))
-                .get(0);
+        (CometDecodedVector) asJava(nativeUtil.importVector(arrays, 
schemas)).get(0);
 
     // Update whether the current vector contains any null values. This is 
used in the following
     // batch(s) to determine whether we can skip loading the native vector.
diff --git 
a/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala
 
b/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala
index f12ab7c4f..342441ce2 100644
--- 
a/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala
+++ 
b/common/src/main/scala/org/apache/spark/sql/comet/execution/arrow/ArrowWriters.scala
@@ -19,7 +19,7 @@
 
 package org.apache.spark.sql.comet.execution.arrow
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.arrow.vector._
 import org.apache.arrow.vector.complex._
diff --git 
a/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala
 
b/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala
index 0c2a55b28..edaee563b 100644
--- 
a/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala
+++ 
b/common/src/main/scala/org/apache/spark/sql/comet/parquet/CometParquetReadSupport.scala
@@ -21,7 +21,7 @@ package org.apache.spark.sql.comet.parquet
 
 import java.util.{Locale, UUID}
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.parquet.schema._
 import 
org.apache.parquet.schema.LogicalTypeAnnotation.ListLogicalTypeAnnotation
diff --git a/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala 
b/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala
index a72208db2..daab5da36 100644
--- a/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala
+++ b/common/src/main/scala/org/apache/spark/sql/comet/util/Utils.scala
@@ -23,7 +23,7 @@ import java.io.{DataInputStream, DataOutputStream, File}
 import java.nio.ByteBuffer
 import java.nio.channels.Channels
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.arrow.c.CDataDictionaryProvider
 import org.apache.arrow.vector.{BigIntVector, BitVector, DateDayVector, 
DecimalVector, FieldVector, FixedSizeBinaryVector, Float4Vector, Float8Vector, 
IntVector, SmallIntVector, TimeStampMicroTZVector, TimeStampMicroVector, 
TinyIntVector, ValueVector, VarBinaryVector, VarCharVector, VectorSchemaRoot}
diff --git 
a/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java 
b/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java
index d4e748a9b..32accae8e 100644
--- a/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java
+++ b/common/src/test/java/org/apache/comet/parquet/TestColumnReader.java
@@ -26,8 +26,6 @@ import java.util.List;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.function.BiFunction;
 
-import scala.collection.JavaConverters;
-
 import org.junit.Test;
 
 import org.apache.arrow.memory.BufferAllocator;
@@ -46,6 +44,7 @@ import org.apache.comet.vector.CometVector;
 
 import static org.apache.spark.sql.types.DataTypes.*;
 import static org.junit.Assert.*;
+import static scala.jdk.javaapi.CollectionConverters.*;
 
 @SuppressWarnings("unchecked")
 public class TestColumnReader {
@@ -97,7 +96,7 @@ public class TestColumnReader {
       StructField field = StructField.apply("f", type, false, null);
 
       List<Object> values = Collections.singletonList(VALUES.get(i));
-      InternalRow row = 
GenericInternalRow.apply(JavaConverters.asScalaBuffer(values).toSeq());
+      InternalRow row = GenericInternalRow.apply(asScala(values).toSeq());
       ConstantColumnReader reader = new ConstantColumnReader(field, 
BATCH_SIZE, row, 0, true);
       reader.readBatch(BATCH_SIZE);
       CometVector vector = reader.currentBatch();
diff --git a/dev/ensure-jars-have-correct-contents.sh 
b/dev/ensure-jars-have-correct-contents.sh
index 23d0be323..f698fe78f 100755
--- a/dev/ensure-jars-have-correct-contents.sh
+++ b/dev/ensure-jars-have-correct-contents.sh
@@ -94,6 +94,12 @@ allowed_expr+="|^org/apache/spark/CometPlugin.class$"
 allowed_expr+="|^org/apache/spark/CometDriverPlugin.*$"
 allowed_expr+="|^org/apache/spark/CometTaskMemoryManager.class$"
 allowed_expr+="|^org/apache/spark/CometTaskMemoryManager.*$"
+allowed_expr+="|^scala-collection-compat.properties$"
+allowed_expr+="|^scala/$"
+allowed_expr+="|^scala/annotation/"
+allowed_expr+="|^scala/collection/"
+allowed_expr+="|^scala/jdk/"
+allowed_expr+="|^scala/util/"
 
 allowed_expr+=")"
 declare -i bad_artifacts=0
diff --git a/fuzz-testing/pom.xml b/fuzz-testing/pom.xml
index 5e5221f1f..728b32b82 100644
--- a/fuzz-testing/pom.xml
+++ b/fuzz-testing/pom.xml
@@ -50,11 +50,23 @@ under the License.
             <groupId>org.apache.spark</groupId>
             <artifactId>spark-sql_${scala.binary.version}</artifactId>
             <scope>provided</scope>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.scala-lang.modules</groupId>
+                    
<artifactId>scala-collection-compat_${scala.binary.version}</artifactId>
+                </exclusion>
+            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.apache.datafusion</groupId>
             
<artifactId>comet-spark-spark${spark.version.short}_${scala.binary.version}</artifactId>
             <version>${project.version}</version>
+            <exclusions>
+                <exclusion>
+                    <groupId>org.scala-lang.modules</groupId>
+                    
<artifactId>scala-collection-compat_${scala.binary.version}</artifactId>
+                </exclusion>
+            </exclusions>
         </dependency>
         <dependency>
             <groupId>org.rogach</groupId>
diff --git a/pom.xml b/pom.xml
index b659ae4e1..cdf1b4e1d 100644
--- a/pom.xml
+++ b/pom.xml
@@ -258,6 +258,11 @@ under the License.
         <artifactId>scala-library</artifactId>
         <version>${scala.version}</version>
       </dependency>
+      <dependency>
+        <groupId>org.scala-lang.modules</groupId>
+        
<artifactId>scala-collection-compat_${scala.binary.version}</artifactId>
+        <version>2.12.0</version>
+      </dependency>
       <dependency>
         <groupId>com.google.protobuf</groupId>
         <artifactId>protobuf-java</artifactId>
diff --git a/spark/pom.xml b/spark/pom.xml
index b71cfabff..77e2d09c6 100644
--- a/spark/pom.xml
+++ b/spark/pom.xml
@@ -220,6 +220,7 @@ under the License.
                   <!-- Relocate Protobuf since Spark uses 2.5.0 while Comet 
uses 3.x -->
                   <include>com.google.protobuf:protobuf-java</include>
                   <include>com.google.guava:guava</include>
+                  
<include>org.scala-lang.modules:scala-collection-compat_${scala.binary.version}</include>
                 </includes>
               </artifactSet>
               <filters>
diff --git 
a/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java
 
b/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java
index 552a189ab..a845e743d 100644
--- 
a/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java
+++ 
b/spark/src/main/java/org/apache/spark/sql/comet/execution/shuffle/CometUnsafeShuffleWriter.java
@@ -33,7 +33,6 @@ import javax.annotation.Nullable;
 
 import scala.Option;
 import scala.Product2;
-import scala.collection.JavaConverters;
 import scala.reflect.ClassTag;
 import scala.reflect.ClassTag$;
 
@@ -81,6 +80,8 @@ import com.google.common.io.Closeables;
 import org.apache.comet.CometConf;
 import org.apache.comet.Native;
 
+import static scala.jdk.javaapi.CollectionConverters.*;
+
 /**
  * This is based on Spark {@link UnsafeShuffleWriter}, as a writer to write 
shuffling rows into
  * Arrow format after sorting rows based on the partition ID.
@@ -201,7 +202,7 @@ public class CometUnsafeShuffleWriter<K, V> extends 
ShuffleWriter<K, V> {
   /** This convenience method should only be called in test code. */
   @VisibleForTesting
   public void write(Iterator<Product2<K, V>> records) throws IOException {
-    write(JavaConverters.asScalaIteratorConverter(records).asScala());
+    write(asScala(records));
   }
 
   @Override
diff --git 
a/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala 
b/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala
index 5c2de2a6b..3f1d8743f 100644
--- a/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala
+++ b/spark/src/main/scala/org/apache/comet/parquet/CometParquetFileFormat.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.parquet
 
-import scala.collection.JavaConverters
+import scala.jdk.CollectionConverters._
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.parquet.filter2.predicate.FilterApi
@@ -164,7 +164,7 @@ class CometParquetFileFormat(scanImpl: String)
             datetimeRebaseSpec.mode == CORRECTED,
             partitionSchema,
             file.partitionValues,
-            JavaConverters.mapAsJavaMap(metrics))
+            metrics.asJava)
           try {
             batchReader.init()
           } catch {
@@ -198,7 +198,7 @@ class CometParquetFileFormat(scanImpl: String)
             datetimeRebaseSpec.mode == CORRECTED,
             partitionSchema,
             file.partitionValues,
-            JavaConverters.mapAsJavaMap(metrics))
+            metrics.asJava)
           try {
             batchReader.init()
           } catch {
diff --git 
a/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala
 
b/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala
index 69cffdd15..495054fc8 100644
--- 
a/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala
+++ 
b/spark/src/main/scala/org/apache/comet/parquet/CometParquetPartitionReaderFactory.scala
@@ -19,8 +19,8 @@
 
 package org.apache.comet.parquet
 
-import scala.collection.JavaConverters
 import scala.collection.mutable
+import scala.jdk.CollectionConverters._
 
 import org.apache.parquet.filter2.predicate.{FilterApi, FilterPredicate}
 import org.apache.parquet.hadoop.ParquetInputFormat
@@ -139,7 +139,7 @@ case class CometParquetPartitionReaderFactory(
         datetimeRebaseSpec.mode == CORRECTED,
         partitionSchema,
         file.partitionValues,
-        JavaConverters.mapAsJavaMap(metrics))
+        metrics.asJava)
       val taskContext = Option(TaskContext.get)
       taskContext.foreach(_.addTaskCompletionListener[Unit](_ => 
cometReader.close()))
       return cometReader
diff --git 
a/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala 
b/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala
index b04d6ebc1..3cb01c08e 100644
--- a/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala
+++ b/spark/src/main/scala/org/apache/comet/parquet/CometParquetScan.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.parquet
 
-import scala.collection.JavaConverters.mapAsScalaMapConverter
+import scala.jdk.CollectionConverters._
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.spark.sql.SparkSession
diff --git a/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala 
b/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala
index 65157014d..dbc3e17f8 100644
--- a/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala
+++ b/spark/src/main/scala/org/apache/comet/parquet/ParquetFilters.scala
@@ -26,8 +26,7 @@ import java.sql.{Date, Timestamp}
 import java.time.{Duration, Instant, LocalDate, Period}
 import java.util.Locale
 
-import scala.collection.JavaConverters._
-import scala.collection.JavaConverters.asScalaBufferConverter
+import scala.jdk.CollectionConverters._
 
 import org.apache.parquet.column.statistics.{Statistics => ParquetStatistics}
 import org.apache.parquet.filter2.predicate._
diff --git a/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala 
b/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala
index cee2764b0..cbca7304d 100644
--- a/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala
+++ b/spark/src/main/scala/org/apache/comet/rules/CometScanRule.scala
@@ -21,8 +21,9 @@ package org.apache.comet.rules
 
 import java.net.URI
 
-import scala.collection.{mutable, JavaConverters}
+import scala.collection.mutable
 import scala.collection.mutable.ListBuffer
+import scala.jdk.CollectionConverters._
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.spark.internal.Logging
@@ -443,7 +444,7 @@ object CometScanRule extends Logging {
       // previously validated
       case _ =>
         try {
-          val objectStoreOptions = 
JavaConverters.mapAsJavaMap(objectStoreConfigMap)
+          val objectStoreOptions = objectStoreConfigMap.asJava
           Native.validateObjectStoreConfig(filePath, objectStoreOptions)
         } catch {
           case e: CometNativeException =>
diff --git a/spark/src/main/scala/org/apache/comet/serde/CometProject.scala 
b/spark/src/main/scala/org/apache/comet/serde/CometProject.scala
index ad48ef27f..651aa8fef 100644
--- a/spark/src/main/scala/org/apache/comet/serde/CometProject.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/CometProject.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.serde
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.sql.execution.ProjectExec
 
diff --git a/spark/src/main/scala/org/apache/comet/serde/CometSort.scala 
b/spark/src/main/scala/org/apache/comet/serde/CometSort.scala
index 5229c7601..2dec25c0d 100644
--- a/spark/src/main/scala/org/apache/comet/serde/CometSort.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/CometSort.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.serde
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.sql.execution.SortExec
 
diff --git a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala 
b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala
index 1742912d8..0f3b1bf1f 100644
--- a/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/QueryPlanSerde.scala
@@ -19,8 +19,8 @@
 
 package org.apache.comet.serde
 
-import scala.collection.JavaConverters._
 import scala.collection.mutable.ListBuffer
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.internal.Logging
 import org.apache.spark.sql.catalyst.expressions._
diff --git a/spark/src/main/scala/org/apache/comet/serde/aggregates.scala 
b/spark/src/main/scala/org/apache/comet/serde/aggregates.scala
index 51c895128..d6e129a3c 100644
--- a/spark/src/main/scala/org/apache/comet/serde/aggregates.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/aggregates.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.serde
 
-import scala.collection.JavaConverters.asJavaIterableConverter
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.sql.catalyst.expressions.{Attribute, EvalMode}
 import 
org.apache.spark.sql.catalyst.expressions.aggregate.{AggregateExpression, 
Average, BitAndAgg, BitOrAgg, BitXorAgg, BloomFilterAggregate, 
CentralMomentAgg, Corr, Count, Covariance, CovPopulation, CovSample, First, 
Last, Max, Min, StddevPop, StddevSamp, Sum, VariancePop, VarianceSamp}
diff --git a/spark/src/main/scala/org/apache/comet/serde/conditional.scala 
b/spark/src/main/scala/org/apache/comet/serde/conditional.scala
index e4f76c101..617043524 100644
--- a/spark/src/main/scala/org/apache/comet/serde/conditional.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/conditional.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.serde
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.sql.catalyst.expressions.{Attribute, CaseWhen, 
Coalesce, Expression, If, IsNotNull}
 
diff --git a/spark/src/main/scala/org/apache/comet/serde/predicates.scala 
b/spark/src/main/scala/org/apache/comet/serde/predicates.scala
index f4e746c27..00a1fb6fa 100644
--- a/spark/src/main/scala/org/apache/comet/serde/predicates.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/predicates.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.serde
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.sql.catalyst.expressions.{And, Attribute, 
EqualNullSafe, EqualTo, Expression, GreaterThan, GreaterThanOrEqual, In, InSet, 
IsNaN, IsNotNull, IsNull, LessThan, LessThanOrEqual, Literal, Not, Or}
 import org.apache.spark.sql.types.BooleanType
diff --git a/spark/src/main/scala/org/apache/comet/serde/structs.scala 
b/spark/src/main/scala/org/apache/comet/serde/structs.scala
index 1c25d87bb..208b2e126 100644
--- a/spark/src/main/scala/org/apache/comet/serde/structs.scala
+++ b/spark/src/main/scala/org/apache/comet/serde/structs.scala
@@ -19,7 +19,7 @@
 
 package org.apache.comet.serde
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.sql.catalyst.expressions.{Attribute, 
CreateNamedStruct, GetArrayStructFields, GetStructField, StructsToJson}
 import org.apache.spark.sql.types.{ArrayType, DataType, DataTypes, MapType, 
StructType}
diff --git 
a/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala 
b/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala
index 95e03ca69..6d0a31236 100644
--- 
a/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala
+++ 
b/spark/src/main/scala/org/apache/spark/sql/comet/CometColumnarToRowExec.scala
@@ -22,8 +22,8 @@ package org.apache.spark.sql.comet
 import java.util.UUID
 import java.util.concurrent.{Future, TimeoutException, TimeUnit}
 
-import scala.collection.JavaConverters._
 import scala.concurrent.Promise
+import scala.jdk.CollectionConverters._
 import scala.util.control.NonFatal
 
 import org.apache.spark.{broadcast, SparkException}
diff --git 
a/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala 
b/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala
index 63cf323a8..fd97fe3fa 100644
--- a/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala
+++ b/spark/src/main/scala/org/apache/spark/sql/comet/CometExecUtils.scala
@@ -19,7 +19,7 @@
 
 package org.apache.spark.sql.comet
 
-import scala.collection.JavaConverters.asJavaIterableConverter
+import scala.jdk.CollectionConverters._
 import scala.reflect.ClassTag
 
 import org.apache.spark.{Partition, SparkContext, TaskContext}
diff --git 
a/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala 
b/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala
index 6f8a53362..70027c2c4 100644
--- a/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala
+++ b/spark/src/main/scala/org/apache/spark/sql/comet/CometMetricNode.scala
@@ -19,7 +19,7 @@
 
 package org.apache.spark.sql.comet
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.SparkContext
 import org.apache.spark.internal.Logging
diff --git 
a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala
 
b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala
index 5d772be40..018c9f7c1 100644
--- 
a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala
+++ 
b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometNativeShuffleWriter.scala
@@ -22,7 +22,7 @@ package org.apache.spark.sql.comet.execution.shuffle
 import java.nio.{ByteBuffer, ByteOrder}
 import java.nio.file.{Files, Paths}
 
-import scala.collection.JavaConverters.asJavaIterableConverter
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.{SparkEnv, TaskContext}
 import org.apache.spark.internal.Logging
diff --git 
a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala
 
b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala
index 1142c6af1..d733c00fe 100644
--- 
a/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala
+++ 
b/spark/src/main/scala/org/apache/spark/sql/comet/execution/shuffle/CometShuffleManager.scala
@@ -22,7 +22,7 @@ package org.apache.spark.sql.comet.execution.shuffle
 import java.util.Collections
 import java.util.concurrent.ConcurrentHashMap
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.ShuffleDependency
 import org.apache.spark.SparkConf
diff --git a/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala 
b/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala
index 49124d63e..9f20644e7 100644
--- a/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala
+++ b/spark/src/test/scala/org/apache/comet/WithHdfsCluster.scala
@@ -24,7 +24,7 @@ import java.net.InetAddress
 import java.nio.file.Files
 import java.util.UUID
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.commons.io.FileUtils
 import org.apache.hadoop.conf.Configuration
diff --git 
a/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala 
b/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala
index 0c75fabe3..56f5bcfde 100644
--- a/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala
+++ b/spark/src/test/scala/org/apache/spark/sql/CometTPCDSQueryTestSuite.scala
@@ -22,7 +22,7 @@ package org.apache.spark.sql
 import java.io.File
 import java.nio.file.{Files, Paths}
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.{SparkConf, SparkContext}
 import org.apache.spark.sql.catalyst.util.{fileToString, resourceToString, 
stringToFile}
diff --git 
a/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala 
b/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala
index aee85bf8e..e158af633 100644
--- a/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala
+++ b/spark/src/test/scala/org/apache/spark/sql/CometTPCHQuerySuite.scala
@@ -22,7 +22,7 @@ package org.apache.spark.sql
 import java.io.File
 import java.nio.file.{Files, Paths}
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 
 import org.apache.spark.{SparkConf, SparkContext}
 import org.apache.spark.internal.config.{MEMORY_OFFHEAP_ENABLED, 
MEMORY_OFFHEAP_SIZE}
diff --git 
a/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala 
b/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala
index fcfbf6074..02b9ca5dc 100644
--- 
a/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala
+++ 
b/spark/src/test/scala/org/apache/spark/sql/benchmark/CometReadBenchmark.scala
@@ -21,7 +21,7 @@ package org.apache.spark.sql.benchmark
 
 import java.io.File
 
-import scala.collection.JavaConverters._
+import scala.jdk.CollectionConverters._
 import scala.util.Random
 
 import org.apache.hadoop.fs.Path


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to