This is an automated email from the ASF dual-hosted git repository.

philo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git


The following commit(s) were added to refs/heads/main by this push:
     new dba95de5c [UT] Add ignoreGluten method (#5553)
dba95de5c is described below

commit dba95de5c5d70cfb7c4355869c32ca82eff1ba02
Author: PHILO-HE <[email protected]>
AuthorDate: Sun Apr 28 13:49:51 2024 +0800

    [UT] Add ignoreGluten method (#5553)
---
 cpp/velox/compute/WholeStageResultIterator.cc      |   4 +-
 .../substrait/expression/ExpressionBuilder.java    |   3 +-
 .../org/apache/gluten/extension/GlutenPlan.scala   |   2 +-
 .../extension/columnar/TransformHintRule.scala     |   2 +-
 .../spark/source/GlutenIcebergSourceUtil.scala     |   7 +-
 .../apache/spark/sql/GlutenSQLTestsBaseTrait.scala |   6 +
 .../apache/spark/sql/GlutenTestsCommonTrait.scala  |   6 +
 .../sql/GlutenDynamicPartitionPruningSuite.scala   |   3 +-
 .../velox/VeloxAdaptiveQueryExecSuite.scala        | 144 +++++++++++----------
 .../spark/sql/GlutenDataFrameAggregateSuite.scala  |   3 +-
 .../sql/GlutenDynamicPartitionPruningSuite.scala   |   3 +-
 .../spark/sql/GlutenDataFrameAggregateSuite.scala  |   3 +-
 .../sql/GlutenDynamicPartitionPruningSuite.scala   |   3 +-
 .../parquet/GlutenParquetFilterSuite.scala         |   2 +-
 .../spark/sql/sources/GlutenInsertSuite.scala      |   3 +-
 .../spark/sql/GlutenDataFrameAggregateSuite.scala  |   3 +-
 .../sql/GlutenDynamicPartitionPruningSuite.scala   |   3 +-
 .../parquet/GlutenParquetFilterSuite.scala         |   2 +-
 .../spark/sql/sources/GlutenInsertSuite.scala      |   3 +-
 19 files changed, 106 insertions(+), 99 deletions(-)

diff --git a/cpp/velox/compute/WholeStageResultIterator.cc 
b/cpp/velox/compute/WholeStageResultIterator.cc
index 63f736dcf..b0dac1d65 100644
--- a/cpp/velox/compute/WholeStageResultIterator.cc
+++ b/cpp/velox/compute/WholeStageResultIterator.cc
@@ -486,7 +486,7 @@ std::unordered_map<std::string, std::string> 
WholeStageResultIterator::getQueryC
     configs[velox::core::QueryConfig::kSessionTimezone] =
         veloxCfg_->get<std::string>(kSessionTimezone, defaultTimezone);
     // Adjust timestamp according to the above configured session timezone.
-    configs[velox::core::QueryConfig::kAdjustTimestampToTimezone] = 
std::to_string(true);
+    configs[velox::core::QueryConfig::kAdjustTimestampToTimezone] = "true";
     // Align Velox size function with Spark.
     configs[velox::core::QueryConfig::kSparkLegacySizeOfNull] = 
std::to_string(veloxCfg_->get<bool>(kLegacySize, true));
 
@@ -505,7 +505,7 @@ std::unordered_map<std::string, std::string> 
WholeStageResultIterator::getQueryC
       configs[velox::core::QueryConfig::kAbandonPartialAggregationMinRows] =
           
std::to_string(veloxCfg_->get<int32_t>(kAbandonPartialAggregationMinRows, 
100000));
       // Spark's collect_set ignore nulls.
-      configs[velox::core::QueryConfig::kPrestoArrayAggIgnoreNulls] = 
std::to_string(true);
+      configs[velox::core::QueryConfig::kPrestoArrayAggIgnoreNulls] = "true";
     }
     // Spill configs
     if (spillStrategy_ == "none") {
diff --git 
a/gluten-core/src/main/java/org/apache/gluten/substrait/expression/ExpressionBuilder.java
 
b/gluten-core/src/main/java/org/apache/gluten/substrait/expression/ExpressionBuilder.java
index 8345ce04b..5d106938c 100644
--- 
a/gluten-core/src/main/java/org/apache/gluten/substrait/expression/ExpressionBuilder.java
+++ 
b/gluten-core/src/main/java/org/apache/gluten/substrait/expression/ExpressionBuilder.java
@@ -16,6 +16,7 @@
  */
 package org.apache.gluten.substrait.expression;
 
+import org.apache.gluten.exception.GlutenNotSupportException;
 import org.apache.gluten.expression.ConverterUtils;
 import org.apache.gluten.substrait.type.*;
 
@@ -204,7 +205,7 @@ public class ExpressionBuilder {
     if (typeNode instanceof StructNode) {
       return makeStructLiteral((InternalRow) obj, typeNode);
     }
-    throw new UnsupportedOperationException(
+    throw new GlutenNotSupportException(
         String.format(
             "Type not supported: %s, obj: %s, class: %s",
             typeNode.toString(), obj.toString(), obj.getClass().toString()));
diff --git 
a/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala 
b/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala
index b5071841f..85901d21d 100644
--- a/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala
+++ b/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenPlan.scala
@@ -72,7 +72,7 @@ trait GlutenPlan extends SparkPlan with LogLevelUtil {
     } catch {
       case e @ (_: GlutenNotSupportException | _: 
UnsupportedOperationException) =>
         if (!e.isInstanceOf[GlutenNotSupportException]) {
-          logDebug(s"This exception may need to be fixed: ${e.getMessage}")
+          logDebug(s"Just a warning. This exception perhaps needs to be 
fixed.", e)
         }
         // FIXME: Use a validation-specific method to catch validation failures
         TestStats.addFallBackClassName(this.getClass.toString)
diff --git 
a/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/TransformHintRule.scala
 
b/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/TransformHintRule.scala
index df1c421b4..ea934425f 100644
--- 
a/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/TransformHintRule.scala
+++ 
b/gluten-core/src/main/scala/org/apache/gluten/extension/columnar/TransformHintRule.scala
@@ -536,7 +536,7 @@ case class AddTransformHintRule() extends Rule[SparkPlan] {
           s"${e.getMessage}, original Spark plan is " +
             s"${plan.getClass}(${plan.children.toList.map(_.getClass)})")
         if (!e.isInstanceOf[GlutenNotSupportException]) {
-          logDebug("This exception may need to be fixed: " + e.getMessage)
+          logDebug("Just a warning. This exception perhaps needs to be 
fixed.", e)
         }
     }
   }
diff --git 
a/gluten-iceberg/src/main/scala/org/apache/iceberg/spark/source/GlutenIcebergSourceUtil.scala
 
b/gluten-iceberg/src/main/scala/org/apache/iceberg/spark/source/GlutenIcebergSourceUtil.scala
index 6c0585149..2b4f54aef 100644
--- 
a/gluten-iceberg/src/main/scala/org/apache/iceberg/spark/source/GlutenIcebergSourceUtil.scala
+++ 
b/gluten-iceberg/src/main/scala/org/apache/iceberg/spark/source/GlutenIcebergSourceUtil.scala
@@ -16,6 +16,7 @@
  */
 package org.apache.iceberg.spark.source
 
+import org.apache.gluten.exception.GlutenNotSupportException
 import org.apache.gluten.substrait.rel.{IcebergLocalFilesBuilder, SplitInfo}
 import org.apache.gluten.substrait.rel.LocalFilesNode.ReadFileFormat
 
@@ -88,9 +89,9 @@ object GlutenIcebergSourceUtil {
             case _ =>
           }
       }
-      throw new UnsupportedOperationException("Iceberg Only support parquet 
and orc file format.")
+      throw new GlutenNotSupportException("Iceberg Only support parquet and 
orc file format.")
     case _ =>
-      throw new UnsupportedOperationException("Only support iceberg 
SparkBatchQueryScan.")
+      throw new GlutenNotSupportException("Only support iceberg 
SparkBatchQueryScan.")
   }
 
   def getPartitionSchema(sparkScan: Scan): StructType = sparkScan match {
@@ -156,6 +157,6 @@ object GlutenIcebergSourceUtil {
       case FileFormat.PARQUET => ReadFileFormat.ParquetReadFormat
       case FileFormat.ORC => ReadFileFormat.OrcReadFormat
       case _ =>
-        throw new UnsupportedOperationException("Iceberg Only support parquet 
and orc file format.")
+        throw new GlutenNotSupportException("Iceberg Only support parquet and 
orc file format.")
     }
 }
diff --git 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
index 956cc67d4..8c55b823a 100644
--- 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
+++ 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
@@ -35,6 +35,12 @@ trait GlutenSQLTestsBaseTrait extends SharedSparkSession 
with GlutenTestsBaseTra
       pos: Position): Unit = {
     test(GLUTEN_TEST + testName, testTag: _*)(testFun)
   }
+
+  protected def ignoreGluten(testName: String, testTag: Tag*)(testFun: => 
Any)(implicit
+      pos: Position): Unit = {
+    super.ignore(GLUTEN_TEST + testName, testTag: _*)(testFun)
+  }
+
   override protected def test(testName: String, testTags: Tag*)(testFun: => 
Any)(implicit
       pos: Position): Unit = {
     if (shouldRun(testName)) {
diff --git 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsCommonTrait.scala
 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsCommonTrait.scala
index a821fbda3..06b9fca67 100644
--- 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsCommonTrait.scala
+++ 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsCommonTrait.scala
@@ -53,6 +53,12 @@ trait GlutenTestsCommonTrait
       pos: Position): Unit = {
     test(GLUTEN_TEST + testName, testTag: _*)(testFun)
   }
+
+  protected def ignoreGluten(testName: String, testTag: Tag*)(testFun: => 
Any)(implicit
+      pos: Position): Unit = {
+    super.ignore(GLUTEN_TEST + testName, testTag: _*)(testFun)
+  }
+
   override protected def test(testName: String, testTags: Tag*)(testFun: => 
Any)(implicit
       pos: Position): Unit = {
     if (shouldRun(testName)) {
diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
index df43b0b58..1e34d2c81 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
@@ -21,7 +21,6 @@ import org.apache.gluten.execution.{BatchScanExecTransformer, 
FileSourceScanExec
 import org.apache.gluten.utils.BackendTestUtils
 
 import org.apache.spark.SparkConf
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.catalyst.expressions.{DynamicPruningExpression, 
Expression}
 import 
org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode.{CODEGEN_ONLY,
 NO_CODEGEN}
 import org.apache.spark.sql.catalyst.plans.ExistenceJoin
@@ -57,7 +56,7 @@ abstract class GlutenDynamicPartitionPruningSuiteBase
 
   // === Following cases override super class's cases ===
 
-  ignore(GLUTEN_TEST + "DPP should not be rewritten as an existential join") {
+  ignoreGluten("DPP should not be rewritten as an existential join") {
     // ignored: BroadcastHashJoinExec is from Vanilla Spark
     withSQLConf(
       SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/adaptive/velox/VeloxAdaptiveQueryExecSuite.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/adaptive/velox/VeloxAdaptiveQueryExecSuite.scala
index b157169f6..f8b6092a4 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/adaptive/velox/VeloxAdaptiveQueryExecSuite.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/adaptive/velox/VeloxAdaptiveQueryExecSuite.scala
@@ -34,6 +34,8 @@ import org.apache.spark.sql.internal.SQLConf
 import org.apache.spark.sql.test.SQLTestData.TestData
 import org.apache.spark.sql.types.{IntegerType, StructType}
 
+import org.apache.log4j.Level
+
 class VeloxAdaptiveQueryExecSuite extends AdaptiveQueryExecSuite with 
GlutenSQLTestsTrait {
   import testImplicits._
 
@@ -814,30 +816,30 @@ class VeloxAdaptiveQueryExecSuite extends 
AdaptiveQueryExecSuite with GlutenSQLT
     }
   }
 
-//  testGluten("Logging plan changes for AQE") {
-//    val testAppender = new LogAppender("plan changes")
-//    withLogAppender(testAppender) {
-//      withSQLConf(
-//        // this test default level is WARN, so we should check warn level
-//        SQLConf.PLAN_CHANGE_LOG_LEVEL.key -> "WARN",
-//        SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
-//        SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80"
-//      ) {
-//        sql(
-//          "SELECT * FROM testData JOIN testData2 ON key = a " +
-//            "WHERE value = (SELECT max(a) FROM testData3)").collect()
-//      }
-//      Seq(
-//        "=== Result of Batch AQE Preparations ===",
-//        "=== Result of Batch AQE Post Stage Creation ===",
-//        "=== Result of Batch AQE Replanning ===",
-//        "=== Result of Batch AQE Query Stage Optimization ==="
-//      ).foreach {
-//        expectedMsg =>
-//          
assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg)))
-//      }
-//    }
-//  }
+  ignoreGluten("Logging plan changes for AQE") {
+    val testAppender = new LogAppender("plan changes")
+    withLogAppender(testAppender) {
+      withSQLConf(
+        // this test default level is WARN, so we should check warn level
+        SQLConf.PLAN_CHANGE_LOG_LEVEL.key -> "WARN",
+        SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
+        SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80"
+      ) {
+        sql(
+          "SELECT * FROM testData JOIN testData2 ON key = a " +
+            "WHERE value = (SELECT max(a) FROM testData3)").collect()
+      }
+      Seq(
+        "=== Result of Batch AQE Preparations ===",
+        "=== Result of Batch AQE Post Stage Creation ===",
+        "=== Result of Batch AQE Replanning ===",
+        "=== Result of Batch AQE Query Stage Optimization ==="
+      ).foreach {
+        expectedMsg =>
+          
assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg)))
+      }
+    }
+  }
 
   testGluten("SPARK-33551: Do not use AQE shuffle read for repartition") {
     def hasRepartitionShuffle(plan: SparkPlan): Boolean = {
@@ -1450,51 +1452,51 @@ class VeloxAdaptiveQueryExecSuite extends 
AdaptiveQueryExecSuite with GlutenSQLT
     }
   }
 
-//  testGluten("test log level") {
-//    def verifyLog(expectedLevel: Level): Unit = {
-//      val logAppender = new LogAppender("adaptive execution")
-//      logAppender.setThreshold(expectedLevel)
-//      withLogAppender(
-//        logAppender,
-//        loggerNames = 
Seq(AdaptiveSparkPlanExec.getClass.getName.dropRight(1)),
-//        level = Some(Level.TRACE)) {
-//        withSQLConf(
-//          SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
-//          SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "300") {
-//          sql("SELECT * FROM testData join testData2 ON key = a where value 
= '1'").collect()
-//        }
-//      }
-//      Seq("Plan changed", "Final plan").foreach {
-//        msg =>
-//          assert(logAppender.loggingEvents.exists {
-//            event => event.getRenderedMessage.contains(msg) && 
event.getLevel == expectedLevel
-//          })
-//      }
-//    }
-//
-//    // Verify default log level
-//    verifyLog(Level.DEBUG)
-//
-//    // Verify custom log level
-//    val levels = Seq(
-//      "TRACE" -> Level.TRACE,
-//      "trace" -> Level.TRACE,
-//      "DEBUG" -> Level.DEBUG,
-//      "debug" -> Level.DEBUG,
-//      "INFO" -> Level.INFO,
-//      "info" -> Level.INFO,
-//      "WARN" -> Level.WARN,
-//      "warn" -> Level.WARN,
-//      "ERROR" -> Level.ERROR,
-//      "error" -> Level.ERROR,
-//      "deBUG" -> Level.DEBUG
-//    )
-//
-//    levels.foreach {
-//      level =>
-//        withSQLConf(SQLConf.ADAPTIVE_EXECUTION_LOG_LEVEL.key -> level._1) {
-//          verifyLog(level._2)
-//        }
-//    }
-//  }
+  ignoreGluten("test log level") {
+    def verifyLog(expectedLevel: Level): Unit = {
+      val logAppender = new LogAppender("adaptive execution")
+      logAppender.setThreshold(expectedLevel)
+      withLogAppender(
+        logAppender,
+        loggerNames = Seq(AdaptiveSparkPlanExec.getClass.getName.dropRight(1)),
+        level = Some(Level.TRACE)) {
+        withSQLConf(
+          SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
+          SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "300") {
+          sql("SELECT * FROM testData join testData2 ON key = a where value = 
'1'").collect()
+        }
+      }
+      Seq("Plan changed", "Final plan").foreach {
+        msg =>
+          assert(logAppender.loggingEvents.exists {
+            event => event.getRenderedMessage.contains(msg) && event.getLevel 
== expectedLevel
+          })
+      }
+    }
+
+    // Verify default log level
+    verifyLog(Level.DEBUG)
+
+    // Verify custom log level
+    val levels = Seq(
+      "TRACE" -> Level.TRACE,
+      "trace" -> Level.TRACE,
+      "DEBUG" -> Level.DEBUG,
+      "debug" -> Level.DEBUG,
+      "INFO" -> Level.INFO,
+      "info" -> Level.INFO,
+      "WARN" -> Level.WARN,
+      "warn" -> Level.WARN,
+      "ERROR" -> Level.ERROR,
+      "error" -> Level.ERROR,
+      "deBUG" -> Level.DEBUG
+    )
+
+    levels.foreach {
+      level =>
+        withSQLConf(SQLConf.ADAPTIVE_EXECUTION_LOG_LEVEL.key -> level._1) {
+          verifyLog(level._2)
+        }
+    }
+  }
 }
diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
index 06dab4652..cba70c21f 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
@@ -18,7 +18,6 @@ package org.apache.spark.sql
 
 import org.apache.gluten.execution.HashAggregateExecBaseTransformer
 
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.execution.WholeStageCodegenExec
 import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, 
SortAggregateExec}
 import org.apache.spark.sql.expressions.Aggregator
@@ -130,7 +129,7 @@ class GlutenDataFrameAggregateSuite extends 
DataFrameAggregateSuite with GlutenS
 //      Row(new java.math.BigDecimal(2), new java.math.BigDecimal(6)) :: Nil)
   }
 
-  ignore(GLUTEN_TEST + "SPARK-32038: NormalizeFloatingNumbers should work on 
distinct aggregate") {
+  ignoreGluten("SPARK-32038: NormalizeFloatingNumbers should work on distinct 
aggregate") {
     withTempView("view") {
       Seq(
         ("mithunr", Float.NaN),
diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
index 766cfc012..a08d3eff3 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
@@ -21,7 +21,6 @@ import org.apache.gluten.execution.{BatchScanExecTransformer, 
FileSourceScanExec
 import org.apache.gluten.utils.BackendTestUtils
 
 import org.apache.spark.SparkConf
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.catalyst.expressions.{DynamicPruningExpression, 
Expression}
 import 
org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode.{CODEGEN_ONLY,
 NO_CODEGEN}
 import org.apache.spark.sql.catalyst.plans.ExistenceJoin
@@ -57,7 +56,7 @@ abstract class GlutenDynamicPartitionPruningSuiteBase
 
   // === Following cases override super class's cases ===
 
-  ignore(GLUTEN_TEST + "DPP should not be rewritten as an existential join") {
+  ignoreGluten("DPP should not be rewritten as an existential join") {
     // ignored: BroadcastHashJoinExec is from Vanilla Spark
     withSQLConf(
       SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
index 06dab4652..cba70c21f 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
@@ -18,7 +18,6 @@ package org.apache.spark.sql
 
 import org.apache.gluten.execution.HashAggregateExecBaseTransformer
 
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.execution.WholeStageCodegenExec
 import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, 
SortAggregateExec}
 import org.apache.spark.sql.expressions.Aggregator
@@ -130,7 +129,7 @@ class GlutenDataFrameAggregateSuite extends 
DataFrameAggregateSuite with GlutenS
 //      Row(new java.math.BigDecimal(2), new java.math.BigDecimal(6)) :: Nil)
   }
 
-  ignore(GLUTEN_TEST + "SPARK-32038: NormalizeFloatingNumbers should work on 
distinct aggregate") {
+  ignoreGluten("SPARK-32038: NormalizeFloatingNumbers should work on distinct 
aggregate") {
     withTempView("view") {
       Seq(
         ("mithunr", Float.NaN),
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
index d51b83ea2..559aaada4 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
@@ -20,7 +20,6 @@ import org.apache.gluten.GlutenConfig
 import org.apache.gluten.execution.{BatchScanExecTransformer, 
FileSourceScanExecTransformer, FilterExecTransformerBase}
 
 import org.apache.spark.SparkConf
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.catalyst.expressions.{DynamicPruningExpression, 
Expression}
 import 
org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode.{CODEGEN_ONLY,
 NO_CODEGEN}
 import org.apache.spark.sql.catalyst.plans.ExistenceJoin
@@ -58,7 +57,7 @@ abstract class GlutenDynamicPartitionPruningSuiteBase
 
   // === Following cases override super class's cases ===
 
-  ignore(GLUTEN_TEST + "DPP should not be rewritten as an existential join") {
+  ignoreGluten("DPP should not be rewritten as an existential join") {
     // ignored: BroadcastHashJoinExec is from Vanilla Spark
     withSQLConf(
       SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
index 5fa54f305..471d88f36 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
@@ -259,7 +259,7 @@ abstract class GltuenParquetFilterSuite extends 
ParquetFilterSuite with GlutenSQ
   }
 
   // Velox doesn't support ParquetOutputFormat.PAGE_SIZE and 
ParquetOutputFormat.BLOCK_SIZE.
-  ignore(GlutenTestConstants.GLUTEN_TEST + "Support Parquet column index") {
+  ignoreGluten("Support Parquet column index") {
     // block 1:
     //                      null count  min                                    
   max
     // page-0                         0  0                                     
    99
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
index 3a24fdd68..ca4b3740a 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
@@ -23,7 +23,6 @@ import org.apache.spark.SparkConf
 import org.apache.spark.executor.OutputMetrics
 import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
 import org.apache.spark.sql._
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.execution.{CommandResultExec, QueryExecution, 
VeloxColumnarWriteFilesExec}
 import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
@@ -487,7 +486,7 @@ class GlutenInsertSuite
     }
   }
 
-  ignore(GLUTEN_TEST + "SPARK-39557 INSERT INTO statements with tables with 
map defaults") {
+  ignoreGluten("SPARK-39557 INSERT INTO statements with tables with map 
defaults") {
     withSQLConf("spark.gluten.sql.complexType.scan.fallback.enabled" -> 
"false") {
 
       import testImplicits._
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
index 06dab4652..cba70c21f 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
@@ -18,7 +18,6 @@ package org.apache.spark.sql
 
 import org.apache.gluten.execution.HashAggregateExecBaseTransformer
 
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.execution.WholeStageCodegenExec
 import org.apache.spark.sql.execution.aggregate.{HashAggregateExec, 
SortAggregateExec}
 import org.apache.spark.sql.expressions.Aggregator
@@ -130,7 +129,7 @@ class GlutenDataFrameAggregateSuite extends 
DataFrameAggregateSuite with GlutenS
 //      Row(new java.math.BigDecimal(2), new java.math.BigDecimal(6)) :: Nil)
   }
 
-  ignore(GLUTEN_TEST + "SPARK-32038: NormalizeFloatingNumbers should work on 
distinct aggregate") {
+  ignoreGluten("SPARK-32038: NormalizeFloatingNumbers should work on distinct 
aggregate") {
     withTempView("view") {
       Seq(
         ("mithunr", Float.NaN),
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
index d51b83ea2..559aaada4 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDynamicPartitionPruningSuite.scala
@@ -20,7 +20,6 @@ import org.apache.gluten.GlutenConfig
 import org.apache.gluten.execution.{BatchScanExecTransformer, 
FileSourceScanExecTransformer, FilterExecTransformerBase}
 
 import org.apache.spark.SparkConf
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.catalyst.expressions.{DynamicPruningExpression, 
Expression}
 import 
org.apache.spark.sql.catalyst.expressions.CodegenObjectFactoryMode.{CODEGEN_ONLY,
 NO_CODEGEN}
 import org.apache.spark.sql.catalyst.plans.ExistenceJoin
@@ -58,7 +57,7 @@ abstract class GlutenDynamicPartitionPruningSuiteBase
 
   // === Following cases override super class's cases ===
 
-  ignore(GLUTEN_TEST + "DPP should not be rewritten as an existential join") {
+  ignoreGluten("DPP should not be rewritten as an existential join") {
     // ignored: BroadcastHashJoinExec is from Vanilla Spark
     withSQLConf(
       SQLConf.DYNAMIC_PARTITION_PRUNING_ENABLED.key -> "true",
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
index 1da064399..bb4a78a82 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetFilterSuite.scala
@@ -259,7 +259,7 @@ abstract class GltuenParquetFilterSuite extends 
ParquetFilterSuite with GlutenSQ
   }
 
   // Velox doesn't support ParquetOutputFormat.PAGE_SIZE and 
ParquetOutputFormat.BLOCK_SIZE.
-  ignore(GlutenTestConstants.GLUTEN_TEST + "Support Parquet column index") {
+  ignoreGluten("Support Parquet column index") {
     // block 1:
     //                      null count  min                                    
   max
     // page-0                         0  0                                     
    99
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
index 573decfc9..2814c2e8c 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/sources/GlutenInsertSuite.scala
@@ -23,7 +23,6 @@ import org.apache.spark.SparkConf
 import org.apache.spark.executor.OutputMetrics
 import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
 import org.apache.spark.sql._
-import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
 import org.apache.spark.sql.catalyst.TableIdentifier
 import org.apache.spark.sql.execution.{CommandResultExec, QueryExecution, 
VeloxColumnarWriteFilesExec}
 import org.apache.spark.sql.execution.adaptive.AdaptiveSparkPlanHelper
@@ -487,7 +486,7 @@ class GlutenInsertSuite
     }
   }
 
-  ignore(GLUTEN_TEST + "SPARK-39557 INSERT INTO statements with tables with 
map defaults") {
+  ignoreGluten("SPARK-39557 INSERT INTO statements with tables with map 
defaults") {
     withSQLConf("spark.gluten.sql.complexType.scan.fallback.enabled" -> 
"false") {
 
       import testImplicits._


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to