This is an automated email from the ASF dual-hosted git repository.
philo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new eff5de6ff [GLUTEN-6997][VL] Ignore a test: cleanup file if job failed
(#6965)
eff5de6ff is described below
commit eff5de6ff6ac0c65dd0106b75374b79647b5bf40
Author: PHILO-HE <[email protected]>
AuthorDate: Tue Aug 27 11:23:20 2024 +0800
[GLUTEN-6997][VL] Ignore a test: cleanup file if job failed (#6965)
---
.../org/apache/spark/sql/sources/GlutenInsertSuite.scala | 13 ++++++++-----
.../org/apache/spark/sql/sources/GlutenInsertSuite.scala | 13 ++++++++-----
2 files changed, 16 insertions(+), 10 deletions(-)
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
index ca0ada39c..3c334511a 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
@@ -114,16 +114,19 @@ class GlutenInsertSuite
}
}
- testGluten("Cleanup staging files if job is failed") {
- withTable("t") {
- spark.sql("CREATE TABLE t (c1 int, c2 string) USING PARQUET")
- val table =
spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
+ ignoreGluten("Cleanup staging files if job failed") {
+ // Using a unique table name in this test. Sometimes, the table is not
removed for some unknown
+ // reason, which can cause test failure (location already exists) if other
following tests have
+ // the same table name.
+ withTable("tbl") {
+ spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET")
+ val table =
spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl"))
assert(new File(table.location).list().length == 0)
intercept[Exception] {
spark.sql(
"""
- |INSERT INTO TABLE t
+ |INSERT INTO TABLE tbl
|SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3,
1, 2)
|""".stripMargin
)
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
index 084c2faa8..3d9d8842f 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
@@ -116,16 +116,19 @@ class GlutenInsertSuite
}
}
- testGluten("Cleanup staging files if job is failed") {
- withTable("t") {
- spark.sql("CREATE TABLE t (c1 int, c2 string) USING PARQUET")
- val table =
spark.sessionState.catalog.getTableMetadata(TableIdentifier("t"))
+ ignoreGluten("Cleanup staging files if job failed") {
+ // Using a unique table name in this test. Sometimes, the table is not
removed for some unknown
+ // reason, which can cause test failure (location already exists) if other
following tests have
+ // the same table name.
+ withTable("tbl") {
+ spark.sql("CREATE TABLE tbl (c1 int, c2 string) USING PARQUET")
+ val table =
spark.sessionState.catalog.getTableMetadata(TableIdentifier("tbl"))
assert(new File(table.location).list().length == 0)
intercept[Exception] {
spark.sql(
"""
- |INSERT INTO TABLE t
+ |INSERT INTO TABLE tbl
|SELECT id, assert_true(SPARK_PARTITION_ID() = 1) FROM range(1, 3,
1, 2)
|""".stripMargin
)
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]