This is an automated email from the ASF dual-hosted git repository.
ulyssesyou pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new 34ea806b6 [VL] Add InsertIntoHadoopFsRelationCommand test case for csv
format (#5681)
34ea806b6 is described below
commit 34ea806b67c9f0d8692cb105b6f23c25ef202a7f
Author: Joey <[email protected]>
AuthorDate: Sat May 11 09:21:34 2024 +0800
[VL] Add InsertIntoHadoopFsRelationCommand test case for csv format (#5681)
---
.../org/apache/gluten/execution/TestOperator.scala | 17 +++++++++++++++++
.../gluten/execution/WholeStageTransformerSuite.scala | 4 +++-
2 files changed, 20 insertions(+), 1 deletion(-)
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/TestOperator.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/TestOperator.scala
index 920b8e2bd..b69223be1 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/TestOperator.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/TestOperator.scala
@@ -515,6 +515,23 @@ class TestOperator extends VeloxWholeStageTransformerSuite
{
}
}
+ test("insert into select from csv") {
+ withTable("insert_csv_t") {
+ val filePath = rootPath + "/datasource/csv/student.csv"
+ val df = spark.read
+ .format("csv")
+ .option("header", "true")
+ .load(filePath)
+ df.createOrReplaceTempView("student")
+ spark.sql("create table insert_csv_t(Name string, Language string) using
parquet;")
+ runQueryAndCompare("""
+ |insert into insert_csv_t select * from student;
+ |""".stripMargin) {
+ checkGlutenOperatorMatch[ArrowFileSourceScanExec]
+ }
+ }
+ }
+
test("test OneRowRelation") {
val df = sql("SELECT 1")
checkAnswer(df, Row(1))
diff --git
a/gluten-core/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
b/gluten-core/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
index 67c12a6f7..c52002b68 100644
---
a/gluten-core/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
+++
b/gluten-core/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
@@ -22,7 +22,7 @@ import org.apache.gluten.utils.{Arm, FallbackUtil}
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, GlutenQueryTest, Row}
-import org.apache.spark.sql.execution.SparkPlan
+import org.apache.spark.sql.execution.{CommandResultExec, SparkPlan}
import org.apache.spark.sql.execution.adaptive.{AdaptiveSparkPlanExec,
AdaptiveSparkPlanHelper, ShuffleQueryStageExec}
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.DoubleType
@@ -222,6 +222,8 @@ abstract class WholeStageTransformerSuite
df.queryExecution.executedPlan match {
case exec: AdaptiveSparkPlanExec =>
getChildrenPlan(Seq(exec.executedPlan))
+ case cmd: CommandResultExec =>
+ getChildrenPlan(Seq(cmd.commandPhysicalPlan))
case plan =>
getChildrenPlan(Seq(plan))
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]