This is an automated email from the ASF dual-hosted git repository.

philo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git


The following commit(s) were added to refs/heads/main by this push:
     new 06bf90e89 [Minor] Move a test from spark-3.2 module to a common test 
module (#6585)
06bf90e89 is described below

commit 06bf90e89911030348af4905283165ed3b811eb8
Author: PHILO-HE <[email protected]>
AuthorDate: Fri Jul 26 13:18:40 2024 +0800

    [Minor] Move a test from spark-3.2 module to a common test module (#6585)
---
 .../org/apache/spark/sql/GlutenSQLQuerySuite.scala | 20 -------
 .../org/apache/gluten/sql/SQLQuerySuite.scala      | 66 ++++++++++++++++++++++
 2 files changed, 66 insertions(+), 20 deletions(-)

diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQuerySuite.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQuerySuite.scala
index e5962ea14..424fd1b3e 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQuerySuite.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQuerySuite.scala
@@ -16,8 +16,6 @@
  */
 package org.apache.spark.sql
 
-import org.apache.gluten.GlutenConfig
-
 import org.apache.spark.SparkException
 import org.apache.spark.sql.execution.columnar.InMemoryTableScanExec
 import org.apache.spark.sql.internal.SQLConf
@@ -72,24 +70,6 @@ class GlutenSQLQuerySuite extends SQLQuerySuite with 
GlutenSQLTestsTrait {
     }
   }
 
-  testGluten("Support run with Vector reader in FileSourceScan or BatchScan") {
-    withSQLConf(
-      SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true",
-      SQLConf.CACHE_VECTORIZED_READER_ENABLED.key -> "true",
-      GlutenConfig.COLUMNAR_BATCHSCAN_ENABLED.key -> "false",
-      GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key -> "false"
-    ) {
-      withTable("t1") {
-        sql("""CREATE TABLE t1(name STRING, id BINARY, part BINARY)
-              |USING PARQUET PARTITIONED BY (part)""".stripMargin)
-        sql("INSERT INTO t1 PARTITION(part = 'Spark SQL') VALUES('a', 
X'537061726B2053514C')")
-        checkAnswer(
-          sql("SELECT name, cast(id as string), cast(part as string) FROM t1"),
-          Row("a", "Spark SQL", "Spark SQL"))
-      }
-    }
-  }
-
   testGluten("SPARK-33593: Vector reader got incorrect data with binary 
partition value") {
     Seq("false").foreach(
       value => {
diff --git 
a/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala 
b/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
new file mode 100644
index 000000000..39b9ee33b
--- /dev/null
+++ b/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
@@ -0,0 +1,66 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.gluten.sql
+
+import org.apache.gluten.GlutenConfig
+import org.apache.gluten.execution.WholeStageTransformerSuite
+import org.apache.gluten.utils.BackendTestUtils
+import org.apache.gluten.utils.SystemParameters
+
+import org.apache.spark.SparkConf
+import org.apache.spark.sql.Row
+import org.apache.spark.sql.internal.SQLConf
+
+class SQLQuerySuite extends WholeStageTransformerSuite {
+  protected val resourcePath: String = null
+  protected val fileFormat: String = null
+  override protected val logLevel: String = "INFO"
+
+  override protected def sparkConf: SparkConf = {
+    val conf = super.sparkConf
+      .set("spark.plugins", "org.apache.gluten.GlutenPlugin")
+      .set("spark.default.parallelism", "1")
+      .set("spark.memory.offHeap.enabled", "true")
+      .set("spark.memory.offHeap.size", "1024MB")
+      .set("spark.ui.enabled", "false")
+      .set("spark.gluten.ui.enabled", "false")
+    if (BackendTestUtils.isCHBackendLoaded()) {
+      conf
+        .set("spark.gluten.sql.enable.native.validation", "false")
+        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+    }
+    conf
+  }
+
+  test("Support run with Vector reader in FileSourceScan or BatchScan") {
+    withSQLConf(
+      SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true",
+      SQLConf.CACHE_VECTORIZED_READER_ENABLED.key -> "true",
+      GlutenConfig.COLUMNAR_BATCHSCAN_ENABLED.key -> "false",
+      GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key -> "false"
+    ) {
+      withTable("t1") {
+        sql("""CREATE TABLE t1(name STRING, id BINARY, part BINARY)
+              |USING PARQUET PARTITIONED BY (part)""".stripMargin)
+        sql("INSERT INTO t1 PARTITION(part = 'Spark SQL') VALUES('a', 
X'537061726B2053514C')")
+        checkAnswer(
+          sql("SELECT name, cast(id as string), cast(part as string) FROM t1"),
+          Row("a", "Spark SQL", "Spark SQL"))
+      }
+    }
+  }
+}


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to