This is an automated email from the ASF dual-hosted git repository.

lwz9103 pushed a commit to branch liquid
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git

commit 904ef0e8ce949e67691eb6fa275b4da67183a4fd
Author: lwz9103 <[email protected]>
AuthorDate: Mon Apr 28 15:43:02 2025 +0800

    [CH] Fix build, see https://github.com/apache/incubator-gluten/pull/9139 
for details
---
 .../org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala | 6 ++++--
 .../scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala    | 6 +++++-
 2 files changed, 9 insertions(+), 3 deletions(-)

diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
index de3a42a5d0..482b15ab03 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/gluten/utils/clickhouse/ClickHouseTestSettings.scala
@@ -146,8 +146,10 @@ class ClickHouseTestSettings extends BackendTestSettings {
     .exclude("should move field up one level of nesting")
     .exclude("SPARK-36778: add ilike API for scala")
   enableSuite[GlutenComplexTypesSuite]
-  enableSuite[GlutenConfigBehaviorSuite].exclude(
-    "SPARK-22160 spark.sql.execution.rangeExchange.sampleSizePerPartition")
+  enableSuite[GlutenConfigBehaviorSuite]
+    .exclude("SPARK-22160 
spark.sql.execution.rangeExchange.sampleSizePerPartition")
+    // Gluten columnar operator will have different number of jobs
+    .exclude("SPARK-40211: customize initialNumPartitions for take")
   enableSuite[GlutenCountMinSketchAggQuerySuite]
   enableSuite[GlutenCsvFunctionsSuite]
   enableSuite[GlutenDSV2CharVarcharTestSuite]
diff --git 
a/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
 
b/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
index e7c6e0cd08..8fc877fe65 100644
--- 
a/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
+++ 
b/shims/spark33/src/main/scala/org/apache/gluten/sql/shims/spark33/Spark33Shims.scala
@@ -40,7 +40,7 @@ import 
org.apache.spark.sql.catalyst.util.RebaseDateTime.RebaseSpec
 import org.apache.spark.sql.catalyst.util.TimestampFormatter
 import org.apache.spark.sql.connector.catalog.Table
 import org.apache.spark.sql.connector.expressions.Transform
-import org.apache.spark.sql.execution.{FileSourceScanExec, GlobalLimitExec, 
PartitionedFileUtil, SparkPlan, TakeOrderedAndProjectExec}
+import org.apache.spark.sql.execution.{CollectLimitExec, FileSourceScanExec, 
GlobalLimitExec, PartitionedFileUtil, SparkPlan, TakeOrderedAndProjectExec}
 import org.apache.spark.sql.execution.datasources._
 import org.apache.spark.sql.execution.datasources.FileFormatWriter.Empty2Null
 import org.apache.spark.sql.execution.datasources.parquet.ParquetFilters
@@ -360,6 +360,10 @@ class Spark33Shims extends SparkShims {
     }
   }
 
+  override def getCollectLimitOffset(plan: CollectLimitExec): Int = {
+    plan.offset
+  }
+
   override def supportsRowBased(plan: SparkPlan): Boolean = 
plan.supportsRowBased
 
   override def dateTimestampFormatInReadIsDefaultValue(


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to