This is an automated email from the ASF dual-hosted git repository.

philo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git


The following commit(s) were added to refs/heads/main by this push:
     new ab7aeae8ab [GLUTEN-8852][CORE] Improve the use of Spark's classic 
SparkSession, Column and Dataset with implicit conversion (#10462)
ab7aeae8ab is described below

commit ab7aeae8ab02cfa748884fafa94a7cb8ca5f88d6
Author: PHILO-HE <[email protected]>
AuthorDate: Sun Aug 17 21:01:27 2025 +0800

    [GLUTEN-8852][CORE] Improve the use of Spark's classic SparkSession, Column 
and Dataset with implicit conversion (#10462)
---
 .github/workflows/velox_backend_x86.yml            |  7 ++---
 .../execution/VeloxStringFunctionsSuite.scala      |  7 ++---
 .../execution/benchmark/VeloxRasBenchmark.scala    | 12 ++++++---
 .../execution/joins/GlutenExistenceJoinSuite.scala | 16 +++++++----
 .../spark/sql/execution/GlutenImplicits.scala      |  7 +++--
 .../org/apache/spark/sql/GlutenQueryTest.scala     |  8 +++---
 .../org/apache/gluten/sql/shims/SparkShims.scala   | 11 +-------
 .../gluten/sql/shims/spark32/Spark32Shims.scala    | 14 +---------
 .../apache/spark/sql/classic/ClassicColumn.scala}  | 16 +++--------
 .../ClassicDataset.scala}                          | 16 +++++------
 .../org/apache/spark/sql/classic/conversions.scala | 17 ++++++++----
 .../gluten/sql/shims/spark33/Spark33Shims.scala    | 14 +---------
 .../apache/spark/sql/classic/ClassicColumn.scala}  | 16 +++--------
 .../apache/spark/sql/classic/ClassicDataset.scala} | 16 +++++------
 .../org/apache/spark/sql/classic/conversions.scala | 17 ++++++++----
 .../gluten/sql/shims/spark34/Spark34Shims.scala    | 14 +---------
 .../{conversions.scala => ClassicColumn.scala}     | 16 +++--------
 .../apache/spark/sql/classic/ClassicDataset.scala} | 16 +++++------
 .../org/apache/spark/sql/classic/conversions.scala | 17 ++++++++----
 .../gluten/sql/shims/spark35/Spark35Shims.scala    | 14 +---------
 .../apache/spark/sql/classic/ClassicColumn.scala}  | 16 +++--------
 .../ClassicDataset.scala}                          | 16 +++++------
 .../org/apache/spark/sql/classic/conversions.scala | 17 ++++++++----
 .../gluten/sql/shims/spark40/Spark40Shims.scala    | 13 +--------
 .../scala/org/apache/spark/sql/SparkSqlUtils.scala | 31 ----------------------
 .../apache/spark/sql/classic/ClassicColumn.scala}  | 19 +++++++------
 .../apache/spark/sql/classic/ClassicDataset.scala} | 17 +++++-------
 .../org/apache/spark/sql/classic/conversions.scala | 20 +++++++-------
 28 files changed, 152 insertions(+), 268 deletions(-)

diff --git a/.github/workflows/velox_backend_x86.yml 
b/.github/workflows/velox_backend_x86.yml
index f15114d933..7bb04c02a7 100644
--- a/.github/workflows/velox_backend_x86.yml
+++ b/.github/workflows/velox_backend_x86.yml
@@ -238,11 +238,8 @@ jobs:
           echo "JAVA_HOME: $JAVA_HOME"
           cd $GITHUB_WORKSPACE/
           if [ "${{ matrix.spark }}" = "spark-4.0" ]; then
-            rm -f 
backends-velox/src/test/scala/org/apache/gluten/execution/VeloxStringFunctionsSuite.scala
 \
-            
backends-velox/src/test/scala/org/apache/spark/sql/execution/GlutenHiveUDFSuite.scala
 \
-            
backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
 \
-            
backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
 \
-            
backends-velox/src/test/scala/org/apache/spark/sql/execution/joins/GlutenExistenceJoinSuite.scala
+            rm -f 
backends-velox/src/test/scala/org/apache/spark/sql/execution/GlutenHiveUDFSuite.scala
 \
+            
backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
             # Only verify compilation for now.
             $MVN_CMD clean install -P${{ matrix.spark }} -P${{ matrix.java }} 
-Pscala-2.13 -Pbackends-velox -DskipTests
             exit 0
diff --git 
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxStringFunctionsSuite.scala
 
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxStringFunctionsSuite.scala
index c1dd956176..6901aabd25 100644
--- 
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxStringFunctionsSuite.scala
+++ 
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxStringFunctionsSuite.scala
@@ -17,8 +17,10 @@
 package org.apache.gluten.execution
 
 import org.apache.spark.SparkConf
+import org.apache.spark.sql.catalyst.expressions.{Alias, Literal}
 import org.apache.spark.sql.catalyst.optimizer.{ConstantFolding, 
NullPropagation}
-import org.apache.spark.sql.functions.{col, lit}
+import org.apache.spark.sql.classic.ClassicColumn
+import org.apache.spark.sql.functions.col
 import org.apache.spark.sql.types.StringType
 
 class VeloxStringFunctionsSuite extends VeloxWholeStageTransformerSuite {
@@ -33,11 +35,10 @@ class VeloxStringFunctionsSuite extends 
VeloxWholeStageTransformerSuite {
 
   override def beforeAll(): Unit = {
     super.beforeAll()
-    val nullColumn = lit(null).cast(StringType).alias(NULL_STR_COL)
     createTPCHNotNullTables()
     spark
       .table("lineitem")
-      .select(col("*"), nullColumn)
+      .select(col("*"), ClassicColumn(Alias(Literal(null, StringType), 
NULL_STR_COL)()))
       .createOrReplaceTempView(LINEITEM_TABLE)
   }
 
diff --git 
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
 
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
index 0c48d75cf4..456d86c903 100644
--- 
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
+++ 
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
@@ -18,11 +18,11 @@ package org.apache.spark.sql.execution.benchmark
 
 import org.apache.gluten.config.GlutenConfig
 import org.apache.gluten.execution.Table
-import org.apache.gluten.sql.shims.SparkShimLoader
 import org.apache.gluten.utils.Arm
 
 import org.apache.spark.benchmark.Benchmark
 import org.apache.spark.sql.SparkSession
+import org.apache.spark.sql.classic.ExtendedClassicConversions._
 import org.apache.spark.sql.internal.SQLConf
 
 import java.io.File
@@ -35,6 +35,12 @@ import scala.io.Source
  * is not considered.
  */
 object VeloxRasBenchmark extends SqlBasedBenchmark {
+
+  // TODO: remove this if we can suppress unused import error.
+  locally {
+    new RichSqlSparkSession(SparkSession)
+  }
+
   private val tpchQueries: String =
     getClass
       .getResource("/")
@@ -72,14 +78,14 @@ object VeloxRasBenchmark extends SqlBasedBenchmark {
   }
 
   private def createLegacySession(): SparkSession = {
-    SparkShimLoader.getSparkShims.cleanupAnyExistingSession()
+    SparkSession.cleanupAnyExistingSession()
     sessionBuilder()
       .config(GlutenConfig.RAS_ENABLED.key, false)
       .getOrCreate()
   }
 
   private def createRasSession(): SparkSession = {
-    SparkShimLoader.getSparkShims.cleanupAnyExistingSession()
+    SparkSession.cleanupAnyExistingSession()
     sessionBuilder()
       .config(GlutenConfig.RAS_ENABLED.key, true)
       .getOrCreate()
diff --git 
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/joins/GlutenExistenceJoinSuite.scala
 
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/joins/GlutenExistenceJoinSuite.scala
index 9540c6af0c..4c6508f69a 100644
--- 
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/joins/GlutenExistenceJoinSuite.scala
+++ 
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/joins/GlutenExistenceJoinSuite.scala
@@ -17,17 +17,23 @@
 package org.apache.spark.sql.execution.joins
 
 import 
org.apache.gluten.execution.{VeloxBroadcastNestedLoopJoinExecTransformer, 
VeloxWholeStageTransformerSuite}
-import org.apache.gluten.sql.shims.SparkShimLoader
 
-import org.apache.spark.sql.{DataFrame, Row}
+import org.apache.spark.sql.{Column, DataFrame, Row}
 import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.plans.ExistenceJoin
 import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.classic.ClassicConversions._
+import org.apache.spark.sql.classic.ClassicDataset
 import org.apache.spark.sql.test.SQLTestUtils
 import org.apache.spark.sql.types._
 
 class GlutenExistenceJoinSuite extends VeloxWholeStageTransformerSuite with 
SQLTestUtils {
 
+  // TODO: remove this if we can suppress unused import error.
+  locally {
+    new ColumnConstructorExt(Column)
+  }
+
   override protected val resourcePath: String = "N/A"
   override protected val fileFormat: String = "N/A"
 
@@ -55,8 +61,8 @@ class GlutenExistenceJoinSuite extends 
VeloxWholeStageTransformerSuite with SQLT
       new StructType().add("id", IntegerType).add("val", StringType)
     )
 
-    val leftPlan = 
SparkShimLoader.getSparkShims.getLogicalPlanFromDataFrame(left)
-    val rightPlan = 
SparkShimLoader.getSparkShims.getLogicalPlanFromDataFrame(right)
+    val leftPlan = left.logicalPlan
+    val rightPlan = right.logicalPlan
 
     val existsAttr = AttributeReference("exists", BooleanType, nullable = 
false)()
 
@@ -75,7 +81,7 @@ class GlutenExistenceJoinSuite extends 
VeloxWholeStageTransformerSuite with SQLT
       child = existenceJoin
     )
 
-    val df = SparkShimLoader.getSparkShims.dataSetOfRows(spark, project)
+    val df = ClassicDataset.ofRows(spark, project)
 
     assert(existenceJoin.joinType == ExistenceJoin(existsAttr))
     assert(existenceJoin.condition.contains(joinCondition))
diff --git 
a/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/GlutenImplicits.scala
 
b/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/GlutenImplicits.scala
index 77064deb49..474a271769 100644
--- 
a/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/GlutenImplicits.scala
+++ 
b/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/GlutenImplicits.scala
@@ -21,7 +21,6 @@ import org.apache.gluten.execution.{GlutenPlan, 
WholeStageTransformer}
 import org.apache.gluten.utils.PlanUtil
 
 import org.apache.spark.sql.{Column, Dataset, SparkSession}
-import org.apache.spark.sql.catalyst.expressions.AttributeReference
 import org.apache.spark.sql.catalyst.plans.QueryPlan
 import org.apache.spark.sql.catalyst.plans.logical.{CommandResult, LogicalPlan}
 import org.apache.spark.sql.catalyst.util.StringUtils.PlanStringConcat
@@ -34,7 +33,6 @@ import 
org.apache.spark.sql.execution.datasources.WriteFilesExec
 import org.apache.spark.sql.execution.datasources.v2.V2CommandExec
 import org.apache.spark.sql.execution.exchange.ReusedExchangeExec
 import org.apache.spark.sql.internal.SQLConf
-import org.apache.spark.sql.types.BooleanType
 
 import scala.collection.mutable
 import scala.collection.mutable.ArrayBuffer
@@ -58,8 +56,9 @@ import scala.collection.mutable.ArrayBuffer
 // format: on
 object GlutenImplicits {
 
-  def noOp(): Unit = {
-    val _ = new ColumnConstructorExt(Column).apply(AttributeReference("fake", 
BooleanType)())
+  // TODO: remove this if we can suppress unused import error.
+  locally {
+    new ColumnConstructorExt(Column)
   }
 
   case class FallbackSummary(
diff --git 
a/gluten-substrait/src/test/scala/org/apache/spark/sql/GlutenQueryTest.scala 
b/gluten-substrait/src/test/scala/org/apache/spark/sql/GlutenQueryTest.scala
index 71ee19bd04..4d292ea13e 100644
--- a/gluten-substrait/src/test/scala/org/apache/spark/sql/GlutenQueryTest.scala
+++ b/gluten-substrait/src/test/scala/org/apache/spark/sql/GlutenQueryTest.scala
@@ -26,7 +26,7 @@ import org.apache.gluten.execution.TransformSupport
 import org.apache.gluten.sql.shims.SparkShimLoader
 
 import org.apache.spark.{SPARK_VERSION_SHORT, SparkConf}
-import org.apache.spark.sql.catalyst.expressions.{Attribute, 
AttributeReference}
+import org.apache.spark.sql.catalyst.expressions.Attribute
 import org.apache.spark.sql.catalyst.plans._
 import org.apache.spark.sql.catalyst.plans.logical._
 import org.apache.spark.sql.catalyst.util._
@@ -34,7 +34,6 @@ import org.apache.spark.sql.classic.ClassicConversions._
 import org.apache.spark.sql.execution.{CommandResultExec, SparkPlan, 
SQLExecution, UnaryExecNode}
 import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec, 
ShuffleQueryStageExec}
 import org.apache.spark.sql.execution.columnar.InMemoryRelation
-import org.apache.spark.sql.types.BooleanType
 import org.apache.spark.storage.StorageLevel
 
 import org.junit.Assert
@@ -48,8 +47,9 @@ import scala.reflect.runtime.universe
 
 abstract class GlutenQueryTest extends PlanTest {
 
-  def noOp(): Unit = {
-    val _ = new ColumnConstructorExt(Column).apply(AttributeReference("fake", 
BooleanType)())
+  // TODO: remove this if we can suppress unused import error.
+  locally {
+    new ColumnConstructorExt(Column)
   }
 
   protected def spark: SparkSession
diff --git 
a/shims/common/src/main/scala/org/apache/gluten/sql/shims/SparkShims.scala 
b/shims/common/src/main/scala/org/apache/gluten/sql/shims/SparkShims.scala
index 6fd93b807d..570e3cfef5 100644
--- a/shims/common/src/main/scala/org/apache/gluten/sql/shims/SparkShims.scala
+++ b/shims/common/src/main/scala/org/apache/gluten/sql/shims/SparkShims.scala
@@ -24,7 +24,7 @@ import org.apache.spark.broadcast.Broadcast
 import org.apache.spark.internal.io.FileCommitProtocol
 import org.apache.spark.scheduler.TaskInfo
 import org.apache.spark.shuffle.ShuffleHandle
-import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession}
+import org.apache.spark.sql.{AnalysisException, SparkSession}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.catalog.BucketSpec
 import org.apache.spark.sql.catalyst.csv.CSVOptions
@@ -318,13 +318,4 @@ trait SparkShims {
   def unBase64FunctionFailsOnError(unBase64: UnBase64): Boolean = false
 
   def widerDecimalType(d1: DecimalType, d2: DecimalType): DecimalType
-
-  /** For test use only */
-  def cleanupAnyExistingSession(): Unit
-
-  /** For test use only */
-  def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan
-
-  /** For test use only */
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame
 }
diff --git 
a/shims/spark32/src/main/scala/org/apache/gluten/sql/shims/spark32/Spark32Shims.scala
 
b/shims/spark32/src/main/scala/org/apache/gluten/sql/shims/spark32/Spark32Shims.scala
index 84c1cdba86..baf7868f32 100644
--- 
a/shims/spark32/src/main/scala/org/apache/gluten/sql/shims/spark32/Spark32Shims.scala
+++ 
b/shims/spark32/src/main/scala/org/apache/gluten/sql/shims/spark32/Spark32Shims.scala
@@ -24,7 +24,7 @@ import org.apache.gluten.utils.ExceptionUtils
 import org.apache.spark.{ShuffleUtils, SparkContext}
 import org.apache.spark.scheduler.TaskInfo
 import org.apache.spark.shuffle.ShuffleHandle
-import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession, 
SparkSqlUtils}
+import org.apache.spark.sql.{AnalysisException, SparkSession}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.analysis.DecimalPrecision
 import org.apache.spark.sql.catalyst.catalog.BucketSpec
@@ -315,16 +315,4 @@ class Spark32Shims extends SparkShims {
     DecimalPrecision.widerDecimalType(d1, d2)
   }
 
-  override def cleanupAnyExistingSession(): Unit = {
-    SparkSqlUtils.cleanupAnyExistingSession()
-  }
-
-  override def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    SparkSqlUtils.getLogicalPlanFromDataFrame(df)
-  }
-
-  override def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): 
DataFrame = {
-    SparkSqlUtils.dataSetOfRows(spark, logicalPlan)
-  }
-
 }
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark32/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
similarity index 73%
copy from 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
copy to 
shims/spark32/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
index bb91d4dbb3..bc0fbfcfc3 100644
--- 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark32/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
@@ -19,17 +19,9 @@ package org.apache.spark.sql.classic
 import org.apache.spark.sql.Column
 import org.apache.spark.sql.catalyst.expressions.Expression
 
-/**
- * Just to ensure the code below works for Spark versions earlier than 4.0.
- *
- * import org.apache.spark.sql.classic.ClassicConversions._
- */
-
-trait ClassicConversions {
-
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
+/** Ensures compatibility with Spark 4.0. */
+object ClassicColumn {
+  def apply(e: Expression): Column = {
+    Column(e)
   }
 }
-
-object ClassicConversions extends ClassicConversions
diff --git 
a/shims/spark32/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala 
b/shims/spark32/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
similarity index 72%
copy from shims/spark32/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
copy to 
shims/spark32/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
index 78a1542b47..40b1ab3c54 100644
--- a/shims/spark32/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
+++ 
b/shims/spark32/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
@@ -14,18 +14,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.spark.sql
+package org.apache.spark.sql.classic
 
+import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 
-object SparkSqlUtils {
-  def cleanupAnyExistingSession(): Unit = {
-    SparkSession.cleanupAnyExistingSession()
-  }
-  def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    df.logicalPlan
-  }
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
-    Dataset.ofRows(spark, logicalPlan)
+/** Since Spark 4.0, the method ofRows cannot be invoked directly from 
sql.Dataset. */
+object ClassicDataset {
+  def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
+    Dataset.ofRows(sparkSession, logicalPlan)
   }
 }
diff --git 
a/shims/spark32/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark32/src/main/scala/org/apache/spark/sql/classic/conversions.scala
index bb91d4dbb3..5a54519121 100644
--- 
a/shims/spark32/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark32/src/main/scala/org/apache/spark/sql/classic/conversions.scala
@@ -17,19 +17,26 @@
 package org.apache.spark.sql.classic
 
 import org.apache.spark.sql.Column
-import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.SparkSession
 
 /**
  * Just to ensure the code below works for Spark versions earlier than 4.0.
  *
  * import org.apache.spark.sql.classic.ClassicConversions._
  */
-
 trait ClassicConversions {
 
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
-  }
+  implicit class ColumnConstructorExt(val c: Column.type) {}
 }
 
 object ClassicConversions extends ClassicConversions
+
+/**
+ * Just to ensure the code below works for Spark versions earlier than 4.0.
+ *
+ * import org.apache.spark.sql.classic.ExtendedClassicConversions._
+ */
+object ExtendedClassicConversions {
+
+  implicit class RichSqlSparkSession(sqlSparkSession: SparkSession.type) {}
+}
diff --git 
a/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
 
b/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
index 4edd715a1e..0ad1d94e79 100644
--- 
a/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
+++ 
b/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
@@ -25,7 +25,7 @@ import org.apache.gluten.utils.ExceptionUtils
 import org.apache.spark._
 import org.apache.spark.scheduler.TaskInfo
 import org.apache.spark.shuffle.ShuffleHandle
-import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession, 
SparkSqlUtils}
+import org.apache.spark.sql.{AnalysisException, SparkSession}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.analysis.DecimalPrecision
 import org.apache.spark.sql.catalyst.catalog.BucketSpec
@@ -409,16 +409,4 @@ class Spark33Shims extends SparkShims {
     DecimalPrecision.widerDecimalType(d1, d2)
   }
 
-  override def cleanupAnyExistingSession(): Unit = {
-    SparkSqlUtils.cleanupAnyExistingSession()
-  }
-
-  override def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    SparkSqlUtils.getLogicalPlanFromDataFrame(df)
-  }
-
-  override def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): 
DataFrame = {
-    SparkSqlUtils.dataSetOfRows(spark, logicalPlan)
-  }
-
 }
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark33/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
similarity index 73%
copy from 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
copy to 
shims/spark33/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
index bb91d4dbb3..bc0fbfcfc3 100644
--- 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark33/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
@@ -19,17 +19,9 @@ package org.apache.spark.sql.classic
 import org.apache.spark.sql.Column
 import org.apache.spark.sql.catalyst.expressions.Expression
 
-/**
- * Just to ensure the code below works for Spark versions earlier than 4.0.
- *
- * import org.apache.spark.sql.classic.ClassicConversions._
- */
-
-trait ClassicConversions {
-
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
+/** Ensures compatibility with Spark 4.0. */
+object ClassicColumn {
+  def apply(e: Expression): Column = {
+    Column(e)
   }
 }
-
-object ClassicConversions extends ClassicConversions
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala 
b/shims/spark33/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
similarity index 72%
rename from 
shims/spark34/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
rename to 
shims/spark33/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
index 78a1542b47..40b1ab3c54 100644
--- a/shims/spark34/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
+++ 
b/shims/spark33/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
@@ -14,18 +14,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.spark.sql
+package org.apache.spark.sql.classic
 
+import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 
-object SparkSqlUtils {
-  def cleanupAnyExistingSession(): Unit = {
-    SparkSession.cleanupAnyExistingSession()
-  }
-  def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    df.logicalPlan
-  }
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
-    Dataset.ofRows(spark, logicalPlan)
+/** Since Spark 4.0, the method ofRows cannot be invoked directly from 
sql.Dataset. */
+object ClassicDataset {
+  def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
+    Dataset.ofRows(sparkSession, logicalPlan)
   }
 }
diff --git 
a/shims/spark33/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark33/src/main/scala/org/apache/spark/sql/classic/conversions.scala
index bb91d4dbb3..5a54519121 100644
--- 
a/shims/spark33/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark33/src/main/scala/org/apache/spark/sql/classic/conversions.scala
@@ -17,19 +17,26 @@
 package org.apache.spark.sql.classic
 
 import org.apache.spark.sql.Column
-import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.SparkSession
 
 /**
  * Just to ensure the code below works for Spark versions earlier than 4.0.
  *
  * import org.apache.spark.sql.classic.ClassicConversions._
  */
-
 trait ClassicConversions {
 
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
-  }
+  implicit class ColumnConstructorExt(val c: Column.type) {}
 }
 
 object ClassicConversions extends ClassicConversions
+
+/**
+ * Just to ensure the code below works for Spark versions earlier than 4.0.
+ *
+ * import org.apache.spark.sql.classic.ExtendedClassicConversions._
+ */
+object ExtendedClassicConversions {
+
+  implicit class RichSqlSparkSession(sqlSparkSession: SparkSession.type) {}
+}
diff --git 
a/shims/spark34/src/main/scala/org/apache/gluten/sql/shims/spark34/Spark34Shims.scala
 
b/shims/spark34/src/main/scala/org/apache/gluten/sql/shims/spark34/Spark34Shims.scala
index efbfbf46cc..16affb2ad8 100644
--- 
a/shims/spark34/src/main/scala/org/apache/gluten/sql/shims/spark34/Spark34Shims.scala
+++ 
b/shims/spark34/src/main/scala/org/apache/gluten/sql/shims/spark34/Spark34Shims.scala
@@ -27,7 +27,7 @@ import org.apache.spark.internal.io.FileCommitProtocol
 import org.apache.spark.paths.SparkPath
 import org.apache.spark.scheduler.TaskInfo
 import org.apache.spark.shuffle.ShuffleHandle
-import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession, 
SparkSqlUtils}
+import org.apache.spark.sql.{AnalysisException, SparkSession}
 import org.apache.spark.sql.catalyst.InternalRow
 import org.apache.spark.sql.catalyst.analysis.DecimalPrecision
 import org.apache.spark.sql.catalyst.catalog.BucketSpec
@@ -647,16 +647,4 @@ class Spark34Shims extends SparkShims {
     DecimalPrecision.widerDecimalType(d1, d2)
   }
 
-  override def cleanupAnyExistingSession(): Unit = {
-    SparkSqlUtils.cleanupAnyExistingSession()
-  }
-
-  override def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    SparkSqlUtils.getLogicalPlanFromDataFrame(df)
-  }
-
-  override def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): 
DataFrame = {
-    SparkSqlUtils.dataSetOfRows(spark, logicalPlan)
-  }
-
 }
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark34/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
similarity index 73%
copy from 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
copy to 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
index bb91d4dbb3..bc0fbfcfc3 100644
--- 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark34/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
@@ -19,17 +19,9 @@ package org.apache.spark.sql.classic
 import org.apache.spark.sql.Column
 import org.apache.spark.sql.catalyst.expressions.Expression
 
-/**
- * Just to ensure the code below works for Spark versions earlier than 4.0.
- *
- * import org.apache.spark.sql.classic.ClassicConversions._
- */
-
-trait ClassicConversions {
-
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
+/** Ensures compatibility with Spark 4.0. */
+object ClassicColumn {
+  def apply(e: Expression): Column = {
+    Column(e)
   }
 }
-
-object ClassicConversions extends ClassicConversions
diff --git 
a/shims/spark33/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala 
b/shims/spark34/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
similarity index 72%
rename from 
shims/spark33/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
rename to 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
index 78a1542b47..40b1ab3c54 100644
--- a/shims/spark33/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
+++ 
b/shims/spark34/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
@@ -14,18 +14,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.spark.sql
+package org.apache.spark.sql.classic
 
+import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 
-object SparkSqlUtils {
-  def cleanupAnyExistingSession(): Unit = {
-    SparkSession.cleanupAnyExistingSession()
-  }
-  def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    df.logicalPlan
-  }
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
-    Dataset.ofRows(spark, logicalPlan)
+/** Since Spark 4.0, the method ofRows cannot be invoked directly from 
sql.Dataset. */
+object ClassicDataset {
+  def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
+    Dataset.ofRows(sparkSession, logicalPlan)
   }
 }
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
index bb91d4dbb3..5a54519121 100644
--- 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
@@ -17,19 +17,26 @@
 package org.apache.spark.sql.classic
 
 import org.apache.spark.sql.Column
-import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.SparkSession
 
 /**
  * Just to ensure the code below works for Spark versions earlier than 4.0.
  *
  * import org.apache.spark.sql.classic.ClassicConversions._
  */
-
 trait ClassicConversions {
 
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
-  }
+  implicit class ColumnConstructorExt(val c: Column.type) {}
 }
 
 object ClassicConversions extends ClassicConversions
+
+/**
+ * Just to ensure the code below works for Spark versions earlier than 4.0.
+ *
+ * import org.apache.spark.sql.classic.ExtendedClassicConversions._
+ */
+object ExtendedClassicConversions {
+
+  implicit class RichSqlSparkSession(sqlSparkSession: SparkSession.type) {}
+}
diff --git 
a/shims/spark35/src/main/scala/org/apache/gluten/sql/shims/spark35/Spark35Shims.scala
 
b/shims/spark35/src/main/scala/org/apache/gluten/sql/shims/spark35/Spark35Shims.scala
index 4b47f0b52e..e48a401722 100644
--- 
a/shims/spark35/src/main/scala/org/apache/gluten/sql/shims/spark35/Spark35Shims.scala
+++ 
b/shims/spark35/src/main/scala/org/apache/gluten/sql/shims/spark35/Spark35Shims.scala
@@ -27,7 +27,7 @@ import org.apache.spark.internal.io.FileCommitProtocol
 import org.apache.spark.paths.SparkPath
 import org.apache.spark.scheduler.TaskInfo
 import org.apache.spark.shuffle.ShuffleHandle
-import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession, 
SparkSqlUtils}
+import org.apache.spark.sql.{AnalysisException, SparkSession}
 import org.apache.spark.sql.catalyst.{ExtendedAnalysisException, InternalRow}
 import org.apache.spark.sql.catalyst.analysis.DecimalPrecision
 import org.apache.spark.sql.catalyst.catalog.BucketSpec
@@ -707,16 +707,4 @@ class Spark35Shims extends SparkShims {
     DecimalPrecision.widerDecimalType(d1, d2)
   }
 
-  override def cleanupAnyExistingSession(): Unit = {
-    SparkSqlUtils.cleanupAnyExistingSession()
-  }
-
-  override def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    SparkSqlUtils.getLogicalPlanFromDataFrame(df)
-  }
-
-  override def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): 
DataFrame = {
-    SparkSqlUtils.dataSetOfRows(spark, logicalPlan)
-  }
-
 }
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark35/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
similarity index 73%
copy from 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
copy to 
shims/spark35/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
index bb91d4dbb3..bc0fbfcfc3 100644
--- 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark35/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
@@ -19,17 +19,9 @@ package org.apache.spark.sql.classic
 import org.apache.spark.sql.Column
 import org.apache.spark.sql.catalyst.expressions.Expression
 
-/**
- * Just to ensure the code below works for Spark versions earlier than 4.0.
- *
- * import org.apache.spark.sql.classic.ClassicConversions._
- */
-
-trait ClassicConversions {
-
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
+/** Ensures compatibility with Spark 4.0. */
+object ClassicColumn {
+  def apply(e: Expression): Column = {
+    Column(e)
   }
 }
-
-object ClassicConversions extends ClassicConversions
diff --git 
a/shims/spark35/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala 
b/shims/spark35/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
similarity index 72%
rename from 
shims/spark35/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
rename to 
shims/spark35/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
index 78a1542b47..40b1ab3c54 100644
--- a/shims/spark35/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
+++ 
b/shims/spark35/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
@@ -14,18 +14,14 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.spark.sql
+package org.apache.spark.sql.classic
 
+import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
 
-object SparkSqlUtils {
-  def cleanupAnyExistingSession(): Unit = {
-    SparkSession.cleanupAnyExistingSession()
-  }
-  def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    df.logicalPlan
-  }
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
-    Dataset.ofRows(spark, logicalPlan)
+/** Since Spark 4.0, the method ofRows cannot be invoked directly from 
sql.Dataset. */
+object ClassicDataset {
+  def ofRows(sparkSession: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
+    Dataset.ofRows(sparkSession, logicalPlan)
   }
 }
diff --git 
a/shims/spark35/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark35/src/main/scala/org/apache/spark/sql/classic/conversions.scala
index bb91d4dbb3..5a54519121 100644
--- 
a/shims/spark35/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark35/src/main/scala/org/apache/spark/sql/classic/conversions.scala
@@ -17,19 +17,26 @@
 package org.apache.spark.sql.classic
 
 import org.apache.spark.sql.Column
-import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.SparkSession
 
 /**
  * Just to ensure the code below works for Spark versions earlier than 4.0.
  *
  * import org.apache.spark.sql.classic.ClassicConversions._
  */
-
 trait ClassicConversions {
 
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
-  }
+  implicit class ColumnConstructorExt(val c: Column.type) {}
 }
 
 object ClassicConversions extends ClassicConversions
+
+/**
+ * Just to ensure the code below works for Spark versions earlier than 4.0.
+ *
+ * import org.apache.spark.sql.classic.ExtendedClassicConversions._
+ */
+object ExtendedClassicConversions {
+
+  implicit class RichSqlSparkSession(sqlSparkSession: SparkSession.type) {}
+}
diff --git 
a/shims/spark40/src/main/scala/org/apache/gluten/sql/shims/spark40/Spark40Shims.scala
 
b/shims/spark40/src/main/scala/org/apache/gluten/sql/shims/spark40/Spark40Shims.scala
index 2b20e44b27..f24d489929 100644
--- 
a/shims/spark40/src/main/scala/org/apache/gluten/sql/shims/spark40/Spark40Shims.scala
+++ 
b/shims/spark40/src/main/scala/org/apache/gluten/sql/shims/spark40/Spark40Shims.scala
@@ -27,7 +27,7 @@ import org.apache.spark.internal.io.FileCommitProtocol
 import org.apache.spark.paths.SparkPath
 import org.apache.spark.scheduler.TaskInfo
 import org.apache.spark.shuffle.ShuffleHandle
-import org.apache.spark.sql.{AnalysisException, DataFrame, SparkSession, 
SparkSqlUtils}
+import org.apache.spark.sql.{AnalysisException, SparkSession}
 import org.apache.spark.sql.catalyst.{ExtendedAnalysisException, InternalRow}
 import org.apache.spark.sql.catalyst.analysis.DecimalPrecisionTypeCoercion
 import org.apache.spark.sql.catalyst.catalog.BucketSpec
@@ -696,15 +696,4 @@ class Spark40Shims extends SparkShims {
     DecimalPrecisionTypeCoercion.widerDecimalType(d1, d2)
   }
 
-  override def cleanupAnyExistingSession(): Unit = {
-    SparkSqlUtils.cleanupAnyExistingSession()
-  }
-
-  override def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    SparkSqlUtils.getLogicalPlanFromDataFrame(df)
-  }
-
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
-    SparkSqlUtils.dataSetOfRows(spark, logicalPlan)
-  }
 }
diff --git 
a/shims/spark40/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala 
b/shims/spark40/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
deleted file mode 100644
index dc64f905b4..0000000000
--- a/shims/spark40/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
+++ /dev/null
@@ -1,31 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with
- * this work for additional information regarding copyright ownership.
- * The ASF licenses this file to You under the Apache License, Version 2.0
- * (the "License"); you may not use this file except in compliance with
- * the License.  You may obtain a copy of the License at
- *
- *    http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.spark.sql
-
-import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
-
-object SparkSqlUtils {
-  def cleanupAnyExistingSession(): Unit = {
-    classic.SparkSession.cleanupAnyExistingSession()
-  }
-  def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    df.asInstanceOf[classic.Dataset[Row]].logicalPlan
-  }
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
-    classic.Dataset.ofRows(spark.asInstanceOf[classic.SparkSession], 
logicalPlan)
-  }
-}
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark40/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
similarity index 61%
copy from 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
copy to 
shims/spark40/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
index bb91d4dbb3..7fe63d706e 100644
--- 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark40/src/main/scala/org/apache/spark/sql/classic/ClassicColumn.scala
@@ -18,18 +18,17 @@ package org.apache.spark.sql.classic
 
 import org.apache.spark.sql.Column
 import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.classic.ClassicConversions._
 
 /**
- * Just to ensure the code below works for Spark versions earlier than 4.0.
- *
- * import org.apache.spark.sql.classic.ClassicConversions._
+ * Ensures compatibility with Spark 4.0. The implicit class 
ColumnConstructorExt from
+ * ClassicConversions is used to construct a Column from an Expression. Since 
Spark 4.0, the Column
+ * class is private to the package org.apache.spark. This class provides a way 
to construct a Column
+ * in code that is outside the org.apache.spark package. Developers can 
directly call Column(e) if
+ * ColumnConstructorExt is imported and the caller code is within this package.
  */
-
-trait ClassicConversions {
-
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
+object ClassicColumn {
+  def apply(e: Expression): Column = {
+    Column(e)
   }
 }
-
-object ClassicConversions extends ClassicConversions
diff --git 
a/shims/spark32/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala 
b/shims/spark40/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
similarity index 69%
rename from 
shims/spark32/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
rename to 
shims/spark40/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
index 78a1542b47..1a48a8e110 100644
--- a/shims/spark32/src/main/scala/org/apache/spark/sql/SparkSqlUtils.scala
+++ 
b/shims/spark40/src/main/scala/org/apache/spark/sql/classic/ClassicDataset.scala
@@ -14,18 +14,15 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.spark.sql
+package org.apache.spark.sql.classic
 
 import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
+import org.apache.spark.sql.classic
 
-object SparkSqlUtils {
-  def cleanupAnyExistingSession(): Unit = {
-    SparkSession.cleanupAnyExistingSession()
-  }
-  def getLogicalPlanFromDataFrame(df: DataFrame): LogicalPlan = {
-    df.logicalPlan
-  }
-  def dataSetOfRows(spark: SparkSession, logicalPlan: LogicalPlan): DataFrame 
= {
-    Dataset.ofRows(spark, logicalPlan)
+/** Since Spark 4.0, the method ofRows cannot be invoked directly from 
sql.Dataset. */
+object ClassicDataset {
+  def ofRows(sparkSession: classic.SparkSession, logicalPlan: LogicalPlan): 
DataFrame = {
+    // Redirect to the classic.Dataset companion method.
+    classic.Dataset.ofRows(sparkSession, logicalPlan)
   }
 }
diff --git 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala 
b/shims/spark40/src/main/scala/org/apache/spark/sql/classic/conversions.scala
similarity index 62%
copy from 
shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
copy to 
shims/spark40/src/main/scala/org/apache/spark/sql/classic/conversions.scala
index bb91d4dbb3..b168838f05 100644
--- 
a/shims/spark34/src/main/scala/org/apache/spark/sql/classic/conversions.scala
+++ 
b/shims/spark40/src/main/scala/org/apache/spark/sql/classic/conversions.scala
@@ -16,20 +16,18 @@
  */
 package org.apache.spark.sql.classic
 
-import org.apache.spark.sql.Column
-import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql
 
 /**
- * Just to ensure the code below works for Spark versions earlier than 4.0.
- *
- * import org.apache.spark.sql.classic.ClassicConversions._
+ * Enables access to the methods in the companion object classic.SparkSession 
via sql.SparkSession.
+ * Since Spark 4.0, these methods have been moved from sql.SparkSession to 
sql.classic.SparkSession.
  */
+object ExtendedClassicConversions {
 
-trait ClassicConversions {
-
-  implicit class ColumnConstructorExt(val c: Column.type) {
-    def apply(e: Expression): Column = Column("fake")
+  implicit class RichSqlSparkSession(sqlSparkSession: sql.SparkSession.type) {
+    def cleanupAnyExistingSession(): Unit = {
+      // Redirect to the classic.SparkSession companion method.
+      sql.classic.SparkSession.cleanupAnyExistingSession()
+    }
   }
 }
-
-object ClassicConversions extends ClassicConversions


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]


Reply via email to