This is an automated email from the ASF dual-hosted git repository.
yangzy pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new b2eed1f191 [MINOR] Import config keys instead of hard-coding string
values for gluten-ut (#9834)
b2eed1f191 is described below
commit b2eed1f191fa229c034856634bbbbb6a52c5fff9
Author: Yongkyun Lee <[email protected]>
AuthorDate: Fri Jun 6 01:52:59 2025 -0700
[MINOR] Import config keys instead of hard-coding string values for
gluten-ut (#9834)
---
.../org/apache/gluten/backendsapi/clickhouse/CHBackend.scala | 2 +-
.../scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala | 3 ++-
.../scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala | 5 +++--
.../src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala | 2 +-
.../org/apache/spark/sql/GlutenColumnExpressionSuite.scala | 4 +++-
.../org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala | 5 +++--
.../org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala | 4 +++-
.../scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala | 3 ++-
.../apache/spark/sql/execution/FallbackStrategiesSuite.scala | 10 +++++-----
.../spark/sql/execution/benchmarks/ParquetReadBenchmark.scala | 2 +-
.../spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala | 3 ++-
.../spark/sql/extension/GlutenCustomerExtensionSuite.scala | 6 +++---
.../spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala | 6 ++++--
.../apache/spark/sql/statistics/SparkFunctionStatistics.scala | 3 ++-
.../org/apache/spark/sql/GlutenColumnExpressionSuite.scala | 4 +++-
.../org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala | 5 +++--
.../org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala | 4 +++-
.../test/scala/org/apache/spark/sql/GlutenImplicitsTest.scala | 4 +++-
.../scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala | 3 ++-
.../sql/connector/GlutenKeyGroupedPartitioningSuite.scala | 3 ++-
.../apache/spark/sql/execution/FallbackStrategiesSuite.scala | 10 +++++-----
.../spark/sql/execution/benchmarks/ParquetReadBenchmark.scala | 2 +-
.../spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala | 3 ++-
.../spark/sql/extension/GlutenCustomerExtensionSuite.scala | 6 +++---
.../org/apache/spark/sql/gluten/GlutenFallbackSuite.scala | 2 +-
.../spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala | 6 ++++--
.../spark/sql/hive/execution/GlutenHiveSQLQuerySuite.scala | 3 ++-
.../apache/spark/sql/statistics/SparkFunctionStatistics.scala | 3 ++-
.../org/apache/spark/sql/GlutenColumnExpressionSuite.scala | 4 +++-
.../org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala | 5 +++--
.../org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala | 3 ++-
.../scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala | 3 ++-
.../sql/connector/GlutenKeyGroupedPartitioningSuite.scala | 3 ++-
.../apache/spark/sql/execution/FallbackStrategiesSuite.scala | 10 +++++-----
.../spark/sql/execution/benchmarks/ParquetReadBenchmark.scala | 2 +-
.../datasources/parquet/GlutenParquetRowIndexSuite.scala | 3 ++-
.../spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala | 3 ++-
.../spark/sql/extension/GlutenCustomerExtensionSuite.scala | 6 +++---
.../org/apache/spark/sql/gluten/GlutenFallbackSuite.scala | 2 +-
.../spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala | 6 ++++--
.../apache/spark/sql/statistics/SparkFunctionStatistics.scala | 3 ++-
.../org/apache/spark/sql/GlutenColumnExpressionSuite.scala | 4 +++-
.../org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala | 5 +++--
.../org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala | 3 ++-
.../scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala | 3 ++-
.../sql/connector/GlutenKeyGroupedPartitioningSuite.scala | 3 ++-
.../apache/spark/sql/execution/FallbackStrategiesSuite.scala | 10 +++++-----
.../spark/sql/execution/benchmarks/ParquetReadBenchmark.scala | 2 +-
.../datasources/parquet/GlutenParquetRowIndexSuite.scala | 3 ++-
.../spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala | 3 ++-
.../spark/sql/extension/GlutenCustomerExtensionSuite.scala | 6 +++---
.../org/apache/spark/sql/gluten/GlutenFallbackSuite.scala | 2 +-
.../spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala | 6 ++++--
.../apache/spark/sql/statistics/SparkFunctionStatistics.scala | 3 ++-
.../execution/MergeTwoPhasesHashBaseAggregateSuite.scala | 5 +++--
.../gluten/expressions/GlutenExpressionMappingSuite.scala | 4 ++--
.../gluten/extension/GlutenExtensionRewriteRuleSuite.scala | 3 ++-
.../src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala | 4 ++--
.../apache/spark/sql/GlutenExpressionDataTypesValidation.scala | 5 +++--
.../spark/sql/datasources/GlutenNoopWriterRuleSuite.scala | 3 ++-
.../apache/spark/sql/execution/GlutenSQLRangeExecSuite.scala | 5 +++--
61 files changed, 152 insertions(+), 99 deletions(-)
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
index 2c7ca33438..df89fb3daf 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
@@ -79,7 +79,7 @@ object CHBackend {
object CHBackendSettings extends BackendSettingsApi with Logging {
override def primaryBatchType: Convention.BatchType = CHBatchType
- private val GLUTEN_CLICKHOUSE_SEP_SCAN_RDD =
"spark.gluten.sql.columnar.separate.scan.rdd.for.ch"
+ val GLUTEN_CLICKHOUSE_SEP_SCAN_RDD =
"spark.gluten.sql.columnar.separate.scan.rdd.for.ch"
private val GLUTEN_CLICKHOUSE_SEP_SCAN_RDD_DEFAULT = "false"
// experimental: when the files count per partition exceeds this threshold,
diff --git
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
index 41c652fc60..67b6531df0 100644
---
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
+++
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.BackendTestUtils
import org.apache.spark.rdd.RDD
@@ -86,7 +87,7 @@ object DummyFilterColmnarHelper {
.config("spark.plugins", "org.apache.gluten.GlutenPlugin")
.config("spark.shuffle.manager",
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.config("spark.io.compression.codec", "LZ4")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
} else {
SparkSession
.builder()
diff --git
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
index b6ece98e4a..51b6adce96 100644
---
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
+++
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.BackendTestUtils
import org.apache.spark.SparkConf
@@ -92,7 +93,7 @@ object GlutenSQLTestsBaseTrait {
.set("spark.shuffle.manager",
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.sql.warehouse.dir", warehouse)
.set("spark.ui.enabled", "false")
- .set("spark.gluten.ui.enabled", "false")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
// Avoid static evaluation by spark catalyst. But there are some UT issues
// coming from spark, e.g., expecting SparkException is thrown, but the
wrapped
// exception is thrown.
@@ -103,7 +104,7 @@ object GlutenSQLTestsBaseTrait {
conf
.set("spark.io.compression.codec", "LZ4")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.files.openCostInBytes", "134217728")
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
} else {
diff --git
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala
index 41b0f1be73..22c2280cb2 100644
---
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala
+++
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala
@@ -111,7 +111,7 @@ trait GlutenTestsTrait extends GlutenTestsCommonTrait {
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.config(GlutenConfig.UT_STATISTIC.key, "true")
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
index dbc70bf745..f4f85825c8 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkException
import org.apache.spark.sql.execution.ProjectExec
import org.apache.spark.sql.functions.{assert_true, expr, input_file_name,
lit, raise_error}
@@ -73,7 +75,7 @@ class GlutenColumnExpressionSuite extends
ColumnExpressionSuite with GlutenSQLTe
testGluten(
"input_file_name, input_file_block_start and input_file_block_length " +
"should fall back if scan falls back") {
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
withTempPath {
dir =>
val data = sparkContext.parallelize(0 to 10).toDF("id")
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
index 415b9f8908..027951c471 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.HashAggregateExecBaseTransformer
import org.apache.spark.sql.execution.WholeStageCodegenExec
@@ -187,7 +188,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
// This test is applicable to velox backend. For CH backend, the replacement
is disabled.
testGluten("use gluten hash agg to replace vanilla spark sort agg") {
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "false")) {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
// SortAggregateExec is expected to be used for string type input.
val df = spark.sql("select max(col1) from t1")
@@ -195,7 +196,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
assert(find(df.queryExecution.executedPlan)(_.isInstanceOf[SortAggregateExec]).isDefined)
}
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "true")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "true")) {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
val df = spark.sql("select max(col1) from t1")
checkAnswer(df, Row("D") :: Nil)
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
index 10a0bb25e6..b22613f471 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
@@ -33,7 +35,7 @@ class GlutenFileBasedDataSourceSuite extends
FileBasedDataSourceSuite with Glute
override def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.set(SQLConf.SHUFFLE_PARTITIONS.key, "5")
}
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index de6c9e7ff3..2b4fce3a57 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.exception.GlutenException
import org.apache.gluten.utils.{BackendTestSettings, BackendTestUtils}
@@ -193,7 +194,7 @@ class GlutenSQLQueryTestSuite
conf
.set("spark.io.compression.codec", "LZ4")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.files.openCostInBytes", "134217728")
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
} else {
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
index bbbc913bd8..0eacdcedd2 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
@@ -37,7 +37,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
import FallbackStrategiesSuite._
testGluten("Fall back the whole query if one unsupported") {
- withSQLConf(("spark.gluten.sql.columnar.query.fallback.threshold", "1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_QUERY_FALLBACK_THRESHOLD.key, "1")) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
spark,
@@ -54,7 +54,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Fall back the whole plan if meeting the configured threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"1")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -73,7 +73,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Don't fall back the whole plan if NOT meeting the configured
threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"4")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"4")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -94,7 +94,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Fall back the whole plan if meeting the configured threshold (leaf node
is" +
" transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"2")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"2")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -115,7 +115,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Don't Fall back the whole plan if NOT meeting the configured threshold ("
+
"leaf node is transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"3")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"3")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index 93aa0f2e7d..1fd2cfa26a 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -78,7 +78,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
if (BackendTestUtils.isCHBackendLoaded()) {
conf
.set("spark.io.compression.codec", "LZ4")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
.set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
.set(
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index 9f82301258..ce380aebfa 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution.joins
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{BroadcastHashJoinExecTransformerBase,
BroadcastNestedLoopJoinExecTransformer, ColumnarToRowExecBase,
WholeStageTransformer}
import org.apache.gluten.utils.BackendTestUtils
@@ -86,7 +87,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite
with GlutenTestsCommon
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
index a8eb2361ab..1d25f2a40e 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
@@ -27,10 +27,10 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
super.sparkConf
.set("spark.sql.adaptive.enabled", "false")
.set(
- "spark.gluten.sql.columnar.extended.columnar.pre.rules",
+ GlutenConfig.EXTENDED_COLUMNAR_TRANSFORM_RULES.key,
"org.apache.spark.sql" +
".extension.CustomerColumnarPreRules")
- .set("spark.gluten.sql.columnar.extended.columnar.post.rules", "")
+ .set(GlutenConfig.EXTENDED_COLUMNAR_POST_RULES.key, "")
}
testGluten("test customer column rules") {
@@ -39,7 +39,7 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
sql("insert into my_parquet values (1)")
sql("insert into my_parquet values (2)")
}
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
val df = sql("select * from my_parquet")
val testFileSourceScanExecTransformer =
df.queryExecution.executedPlan.collect {
case f: TestFileSourceScanExecTransformer => f
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 859e6df484..264a7e7836 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.hive.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.TableIdentifier
@@ -25,8 +27,8 @@ class GlutenHiveSQLQueryCHSuite extends
GlutenHiveSQLQuerySuiteBase {
override def sparkConf: SparkConf = {
defaultSparkConf
.set("spark.plugins", "org.apache.gluten.GlutenPlugin")
- .set("spark.gluten.sql.enable.native.validation", "false")
- .set("spark.gluten.sql.native.writer.enabled", "true")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
+ .set(GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")
.set("spark.sql.storeAssignmentPolicy", "legacy")
.set("spark.default.parallelism", "1")
.set("spark.memory.offHeap.enabled", "true")
diff --git
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index 925d60df8e..8cf0cece61 100644
---
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.statistics
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.GlutenPlan
import org.apache.gluten.utils.BackendTestUtils
@@ -61,7 +62,7 @@ class SparkFunctionStatistics extends QueryTest {
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
index dbc70bf745..f4f85825c8 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkException
import org.apache.spark.sql.execution.ProjectExec
import org.apache.spark.sql.functions.{assert_true, expr, input_file_name,
lit, raise_error}
@@ -73,7 +75,7 @@ class GlutenColumnExpressionSuite extends
ColumnExpressionSuite with GlutenSQLTe
testGluten(
"input_file_name, input_file_block_start and input_file_block_length " +
"should fall back if scan falls back") {
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
withTempPath {
dir =>
val data = sparkContext.parallelize(0 to 10).toDF("id")
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
index cba70c21f8..2f3777caa1 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.HashAggregateExecBaseTransformer
import org.apache.spark.sql.execution.WholeStageCodegenExec
@@ -187,7 +188,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
// This test is applicable to velox backend. For CH backend, the replacement
is disabled.
testGluten("use gluten hash agg to replace vanilla spark sort agg") {
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "false")) {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
// SortAggregateExec is expected to be used for string type input.
val df = spark.sql("select max(col1) from t1")
@@ -195,7 +196,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
assert(find(df.queryExecution.executedPlan)(_.isInstanceOf[SortAggregateExec]).isDefined)
}
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "true")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "true")) {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
val df = spark.sql("select max(col1) from t1")
checkAnswer(df, Row("D") :: Nil)
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
index 44b45b6295..e8264556d5 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.{SparkConf, SparkException}
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.GlutenTestConstants.GLUTEN_TEST
@@ -33,7 +35,7 @@ class GlutenFileBasedDataSourceSuite extends
FileBasedDataSourceSuite with Glute
override def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.set(SQLConf.SHUFFLE_PARTITIONS.key, "5")
}
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenImplicitsTest.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenImplicitsTest.scala
index 9e120945be..6de15f434a 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenImplicitsTest.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenImplicitsTest.scala
@@ -16,12 +16,14 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import org.apache.spark.sql.execution.GlutenImplicits._
import org.apache.spark.sql.internal.SQLConf
class GlutenImplicitsTest extends GlutenSQLTestsBaseTrait {
- sys.props.put("spark.gluten.sql.columnar.tableCache", "true")
+ sys.props.put(GlutenConfig.COLUMNAR_TABLE_CACHE_ENABLED.key, "true")
override protected def beforeAll(): Unit = {
super.beforeAll()
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index 9c1bf9cbe1..00bae3541d 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.exception.GlutenException
import org.apache.gluten.utils.{BackendTestSettings, BackendTestUtils}
@@ -193,7 +194,7 @@ class GlutenSQLQueryTestSuite
conf
.set("spark.io.compression.codec", "LZ4")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.files.openCostInBytes", "134217728")
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
} else {
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
index 8bc67f6f8f..9cea1d9dc6 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.connector
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.SortMergeJoinExecTransformerBase
import org.apache.spark.SparkConf
@@ -39,7 +40,7 @@ class GlutenKeyGroupedPartitioningSuite
override def sparkConf: SparkConf = {
// Native SQL configs
super.sparkConf
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.set("spark.sql.adaptive.enabled", "false")
.set("spark.sql.shuffle.partitions", "5")
}
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
index f51f2721f4..4ca6ce7b04 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
@@ -36,7 +36,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
import FallbackStrategiesSuite._
testGluten("Fall back the whole query if one unsupported") {
- withSQLConf(("spark.gluten.sql.columnar.query.fallback.threshold", "1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_QUERY_FALLBACK_THRESHOLD.key, "1")) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
spark,
@@ -53,7 +53,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Fall back the whole plan if meeting the configured threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"1")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -72,7 +72,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Don't fall back the whole plan if NOT meeting the configured
threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"4")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"4")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -93,7 +93,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Fall back the whole plan if meeting the configured threshold (leaf node
is" +
" transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"2")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"2")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -114,7 +114,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Don't Fall back the whole plan if NOT meeting the configured threshold ("
+
"leaf node is transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"3")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"3")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index 8fc9ba8c9c..12cc721154 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -78,7 +78,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
if (BackendTestUtils.isCHBackendLoaded()) {
conf
.set("spark.io.compression.codec", "LZ4")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
.set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
.set(
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index c8b371f48f..c7aaf9ec5c 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution.joins
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.BackendTestUtils
import org.apache.spark.sql.{GlutenTestsCommonTrait, SparkSession}
@@ -63,7 +64,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite
with GlutenTestsCommon
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
index a8eb2361ab..1d25f2a40e 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
@@ -27,10 +27,10 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
super.sparkConf
.set("spark.sql.adaptive.enabled", "false")
.set(
- "spark.gluten.sql.columnar.extended.columnar.pre.rules",
+ GlutenConfig.EXTENDED_COLUMNAR_TRANSFORM_RULES.key,
"org.apache.spark.sql" +
".extension.CustomerColumnarPreRules")
- .set("spark.gluten.sql.columnar.extended.columnar.post.rules", "")
+ .set(GlutenConfig.EXTENDED_COLUMNAR_POST_RULES.key, "")
}
testGluten("test customer column rules") {
@@ -39,7 +39,7 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
sql("insert into my_parquet values (1)")
sql("insert into my_parquet values (2)")
}
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
val df = sql("select * from my_parquet")
val testFileSourceScanExecTransformer =
df.queryExecution.executedPlan.collect {
case f: TestFileSourceScanExecTransformer => f
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
index e761f5a39f..c8a8ac85cd 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
@@ -37,7 +37,7 @@ class GlutenFallbackSuite extends GlutenSQLTestsTrait with
AdaptiveSparkPlanHelp
override def sparkConf: SparkConf = {
super.sparkConf
.set(GlutenConfig.RAS_ENABLED.key, "false")
- .set("spark.gluten.ui.enabled", "true")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "true")
// The gluten ui event test suite expects the spark ui to be enable
.set(UI_ENABLED, true)
}
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 859e6df484..264a7e7836 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.hive.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.TableIdentifier
@@ -25,8 +27,8 @@ class GlutenHiveSQLQueryCHSuite extends
GlutenHiveSQLQuerySuiteBase {
override def sparkConf: SparkConf = {
defaultSparkConf
.set("spark.plugins", "org.apache.gluten.GlutenPlugin")
- .set("spark.gluten.sql.enable.native.validation", "false")
- .set("spark.gluten.sql.native.writer.enabled", "true")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
+ .set(GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")
.set("spark.sql.storeAssignmentPolicy", "legacy")
.set("spark.default.parallelism", "1")
.set("spark.memory.offHeap.enabled", "true")
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQuerySuite.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQuerySuite.scala
index 923a1fa9fb..0b520ae321 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQuerySuite.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQuerySuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.hive.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.FileSourceScanExecTransformer
import org.apache.spark.SparkConf
@@ -69,7 +70,7 @@ class GlutenHiveSQLQuerySuite extends
GlutenHiveSQLQuerySuiteBase {
withSQLConf(
"spark.sql.hive.convertMetastoreOrc" -> convertMetastoreOrc,
- "spark.gluten.sql.orc.charType.scan.fallback.enabled" ->
charTypeFallbackEnabled
+ GlutenConfig.VELOX_FORCE_ORC_CHAR_TYPE_SCAN_FALLBACK.key ->
charTypeFallbackEnabled
) {
val queries = Seq("select id from test_orc", "select name, id from
test_orc")
diff --git
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index c349d9ca91..5723cb1161 100644
---
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.statistics
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.GlutenPlan
import org.apache.gluten.utils.BackendTestUtils
@@ -61,7 +62,7 @@ class SparkFunctionStatistics extends QueryTest {
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
index dbc70bf745..f4f85825c8 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkException
import org.apache.spark.sql.execution.ProjectExec
import org.apache.spark.sql.functions.{assert_true, expr, input_file_name,
lit, raise_error}
@@ -73,7 +75,7 @@ class GlutenColumnExpressionSuite extends
ColumnExpressionSuite with GlutenSQLTe
testGluten(
"input_file_name, input_file_block_start and input_file_block_length " +
"should fall back if scan falls back") {
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
withTempPath {
dir =>
val data = sparkContext.parallelize(0 to 10).toDF("id")
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
index de56b88344..f50aa04dd5 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.HashAggregateExecBaseTransformer
import org.apache.spark.sql.execution.WholeStageCodegenExec
@@ -187,7 +188,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
// This test is applicable to velox backend. For CH backend, the replacement
is disabled.
testGluten("use gluten hash agg to replace vanilla spark sort agg") {
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "false")) {
withTempView("t1") {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
// SortAggregateExec is expected to be used for string type input.
@@ -197,7 +198,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
}
}
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "true")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "true")) {
withTempView("t1") {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
val df = spark.sql("select max(col1) from t1")
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
index cc2b0b62ca..9ae4a0f063 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.BatchScanExecTransformer
import org.apache.spark.{SparkConf, SparkException}
@@ -39,7 +40,7 @@ class GlutenFileBasedDataSourceSuite extends
FileBasedDataSourceSuite with Glute
override def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.set(SQLConf.SHUFFLE_PARTITIONS.key, "5")
}
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index ce5d50b175..30f2ed447e 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.exception.GlutenException
import org.apache.gluten.utils.{BackendTestSettings, BackendTestUtils}
@@ -197,7 +198,7 @@ class GlutenSQLQueryTestSuite
conf
.set("spark.io.compression.codec", "LZ4")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.files.openCostInBytes", "134217728")
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
} else {
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
index 78d4be44fb..aa81d8793a 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.connector
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.SortMergeJoinExecTransformer
import org.apache.spark.SparkConf
@@ -38,7 +39,7 @@ class GlutenKeyGroupedPartitioningSuite
override def sparkConf: SparkConf = {
// Native SQL configs
super.sparkConf
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.set("spark.sql.adaptive.enabled", "false")
.set("spark.sql.shuffle.partitions", "5")
}
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
index f51f2721f4..4ca6ce7b04 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
@@ -36,7 +36,7 @@ import org.apache.spark.sql.catalyst.rules.Rule
class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
import FallbackStrategiesSuite._
testGluten("Fall back the whole query if one unsupported") {
- withSQLConf(("spark.gluten.sql.columnar.query.fallback.threshold", "1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_QUERY_FALLBACK_THRESHOLD.key, "1")) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
spark,
@@ -53,7 +53,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Fall back the whole plan if meeting the configured threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"1")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -72,7 +72,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Don't fall back the whole plan if NOT meeting the configured
threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"4")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"4")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -93,7 +93,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Fall back the whole plan if meeting the configured threshold (leaf node
is" +
" transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"2")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"2")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -114,7 +114,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Don't Fall back the whole plan if NOT meeting the configured threshold ("
+
"leaf node is transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"3")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"3")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index e6e8124e02..f4cb9ad9f6 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -79,7 +79,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
if (BackendTestUtils.isCHBackendLoaded()) {
conf
.set("spark.io.compression.codec", "LZ4")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
.set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
.set(
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
index 7a75977bf9..6efa473540 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution.datasources.parquet
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{BatchScanExecTransformer,
FileSourceScanExecTransformer}
import org.apache.spark.sql.GlutenSQLTestsBaseTrait
@@ -166,7 +167,7 @@ class GlutenParquetRowIndexSuite extends
ParquetRowIndexSuite with GlutenSQLTest
def sqlConfs: Seq[(String, String)] = Seq(
// TODO: remove this change after customized parquet options as
`block_size`, `page_size`
// been fully supported.
- "spark.gluten.sql.native.writer.enabled" -> "false",
+ GlutenConfig.NATIVE_WRITER_ENABLED.key -> "false",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key ->
useVectorizedReader.toString,
SQLConf.FILES_MAX_PARTITION_BYTES.key -> filesMaxPartitionBytes.toString
) ++ { if (useDataSourceV2) Seq(SQLConf.USE_V1_SOURCE_LIST.key -> "") else
Seq.empty }
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index 6543160f1a..f7d31aa031 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution.joins
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.BackendTestUtils
import org.apache.spark.sql.{GlutenTestsCommonTrait, SparkSession}
@@ -61,7 +62,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite
with GlutenTestsCommon
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
index a8eb2361ab..1d25f2a40e 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
@@ -27,10 +27,10 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
super.sparkConf
.set("spark.sql.adaptive.enabled", "false")
.set(
- "spark.gluten.sql.columnar.extended.columnar.pre.rules",
+ GlutenConfig.EXTENDED_COLUMNAR_TRANSFORM_RULES.key,
"org.apache.spark.sql" +
".extension.CustomerColumnarPreRules")
- .set("spark.gluten.sql.columnar.extended.columnar.post.rules", "")
+ .set(GlutenConfig.EXTENDED_COLUMNAR_POST_RULES.key, "")
}
testGluten("test customer column rules") {
@@ -39,7 +39,7 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
sql("insert into my_parquet values (1)")
sql("insert into my_parquet values (2)")
}
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
val df = sql("select * from my_parquet")
val testFileSourceScanExecTransformer =
df.queryExecution.executedPlan.collect {
case f: TestFileSourceScanExecTransformer => f
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
index e53e28fa27..058a63a67d 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
@@ -38,7 +38,7 @@ class GlutenFallbackSuite extends GlutenSQLTestsTrait with
AdaptiveSparkPlanHelp
override def sparkConf: SparkConf = {
super.sparkConf
.set(GlutenConfig.RAS_ENABLED.key, "false")
- .set("spark.gluten.ui.enabled", "true")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "true")
// The gluten ui event test suite expects the spark ui to be enable
.set(UI_ENABLED, true)
}
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 859e6df484..264a7e7836 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.hive.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.TableIdentifier
@@ -25,8 +27,8 @@ class GlutenHiveSQLQueryCHSuite extends
GlutenHiveSQLQuerySuiteBase {
override def sparkConf: SparkConf = {
defaultSparkConf
.set("spark.plugins", "org.apache.gluten.GlutenPlugin")
- .set("spark.gluten.sql.enable.native.validation", "false")
- .set("spark.gluten.sql.native.writer.enabled", "true")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
+ .set(GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")
.set("spark.sql.storeAssignmentPolicy", "legacy")
.set("spark.default.parallelism", "1")
.set("spark.memory.offHeap.enabled", "true")
diff --git
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index f45b5c659c..5bf53e66e6 100644
---
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.statistics
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.GlutenPlan
import org.apache.gluten.utils.BackendTestUtils
@@ -60,7 +61,7 @@ class SparkFunctionStatistics extends QueryTest {
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
index dbc70bf745..f4f85825c8 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenColumnExpressionSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkException
import org.apache.spark.sql.execution.ProjectExec
import org.apache.spark.sql.functions.{assert_true, expr, input_file_name,
lit, raise_error}
@@ -73,7 +75,7 @@ class GlutenColumnExpressionSuite extends
ColumnExpressionSuite with GlutenSQLTe
testGluten(
"input_file_name, input_file_block_start and input_file_block_length " +
"should fall back if scan falls back") {
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
withTempPath {
dir =>
val data = sparkContext.parallelize(0 to 10).toDF("id")
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
index cba70c21f8..2f3777caa1 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenDataFrameAggregateSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.HashAggregateExecBaseTransformer
import org.apache.spark.sql.execution.WholeStageCodegenExec
@@ -187,7 +188,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
// This test is applicable to velox backend. For CH backend, the replacement
is disabled.
testGluten("use gluten hash agg to replace vanilla spark sort agg") {
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "false")) {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
// SortAggregateExec is expected to be used for string type input.
val df = spark.sql("select max(col1) from t1")
@@ -195,7 +196,7 @@ class GlutenDataFrameAggregateSuite extends
DataFrameAggregateSuite with GlutenS
assert(find(df.queryExecution.executedPlan)(_.isInstanceOf[SortAggregateExec]).isDefined)
}
- withSQLConf(("spark.gluten.sql.columnar.force.hashagg", "true")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FORCE_HASHAGG_ENABLED.key, "true")) {
Seq("A", "B", "C", "D").toDF("col1").createOrReplaceTempView("t1")
val df = spark.sql("select max(col1) from t1")
checkAnswer(df, Row("D") :: Nil)
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
index cc2b0b62ca..9ae4a0f063 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenFileBasedDataSourceSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.BatchScanExecTransformer
import org.apache.spark.{SparkConf, SparkException}
@@ -39,7 +40,7 @@ class GlutenFileBasedDataSourceSuite extends
FileBasedDataSourceSuite with Glute
override def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.set(SQLConf.SHUFFLE_PARTITIONS.key, "5")
}
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index 6c439d756c..45a76e4d81 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.exception.GlutenException
import org.apache.gluten.utils.{BackendTestSettings, BackendTestUtils}
@@ -120,7 +121,7 @@ class GlutenSQLQueryTestSuite
conf
.set("spark.io.compression.codec", "LZ4")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.files.openCostInBytes", "134217728")
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
} else {
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
index 6432ba1e67..58ff90c760 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/connector/GlutenKeyGroupedPartitioningSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.connector
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.SortMergeJoinExecTransformer
import org.apache.spark.SparkConf
@@ -38,7 +39,7 @@ class GlutenKeyGroupedPartitioningSuite
override def sparkConf: SparkConf = {
// Native SQL configs
super.sparkConf
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.set("spark.sql.adaptive.enabled", "false")
.set("spark.sql.shuffle.partitions", "5")
}
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
index 1dd7eccc21..31b622e51c 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/FallbackStrategiesSuite.scala
@@ -37,7 +37,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
import FallbackStrategiesSuite._
testGluten("Fall back the whole query if one unsupported") {
- withSQLConf(("spark.gluten.sql.columnar.query.fallback.threshold", "1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_QUERY_FALLBACK_THRESHOLD.key, "1")) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
spark,
@@ -54,7 +54,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Fall back the whole plan if meeting the configured threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"1")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"1")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -73,7 +73,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
}
testGluten("Don't fall back the whole plan if NOT meeting the configured
threshold") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"4")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"4")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -94,7 +94,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Fall back the whole plan if meeting the configured threshold (leaf node
is" +
" transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"2")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"2")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
@@ -115,7 +115,7 @@ class FallbackStrategiesSuite extends GlutenSQLTestsTrait {
testGluten(
"Don't Fall back the whole plan if NOT meeting the configured threshold ("
+
"leaf node is transformable)") {
- withSQLConf(("spark.gluten.sql.columnar.wholeStage.fallback.threshold",
"3")) {
+ withSQLConf((GlutenConfig.COLUMNAR_WHOLESTAGE_FALLBACK_THRESHOLD.key,
"3")) {
CallerInfo.withLocalValue(isAqe = true, isCache = false) {
val originalPlan = UnaryOp2(UnaryOp1(UnaryOp2(UnaryOp1(LeafOp()))))
val rule = newRuleApplier(
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index 8fc9ba8c9c..12cc721154 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -78,7 +78,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
if (BackendTestUtils.isCHBackendLoaded()) {
conf
.set("spark.io.compression.codec", "LZ4")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
.set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
.set(
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
index 4c53396792..5cf41b7a9e 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/GlutenParquetRowIndexSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution.datasources.parquet
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{BatchScanExecTransformer,
FileSourceScanExecTransformer}
import org.apache.spark.sql.GlutenSQLTestsBaseTrait
@@ -167,7 +168,7 @@ class GlutenParquetRowIndexSuite extends
ParquetRowIndexSuite with GlutenSQLTest
def sqlConfs: Seq[(String, String)] = Seq(
// TODO: remove this change after customized parquet options as
`block_size`, `page_size`
// been fully supported.
- "spark.gluten.sql.native.writer.enabled" -> "false",
+ GlutenConfig.NATIVE_WRITER_ENABLED.key -> "false",
SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key ->
useVectorizedReader.toString,
SQLConf.FILES_MAX_PARTITION_BYTES.key -> filesMaxPartitionBytes.toString
) ++ { if (useDataSourceV2) Seq(SQLConf.USE_V1_SOURCE_LIST.key -> "") else
Seq.empty }
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index c8b371f48f..c7aaf9ec5c 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution.joins
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.BackendTestUtils
import org.apache.spark.sql.{GlutenTestsCommonTrait, SparkSession}
@@ -63,7 +64,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite
with GlutenTestsCommon
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
index 739343587e..6dc265bc4f 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/extension/GlutenCustomerExtensionSuite.scala
@@ -27,10 +27,10 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
super.sparkConf
.set("spark.sql.adaptive.enabled", "false")
.set(
- "spark.gluten.sql.columnar.extended.columnar.pre.rules",
+ GlutenConfig.EXTENDED_COLUMNAR_TRANSFORM_RULES.key,
"org.apache.spark.sql" +
".extension.CustomerColumnarPreRules")
- .set("spark.gluten.sql.columnar.extended.columnar.post.rules", "")
+ .set(GlutenConfig.EXTENDED_COLUMNAR_POST_RULES.key, "")
}
testGluten("test customer column rules") {
@@ -39,7 +39,7 @@ class GlutenCustomerExtensionSuite extends
GlutenSQLTestsTrait {
sql("insert into my_parquet values (1)")
sql("insert into my_parquet values (2)")
}
- withSQLConf(("spark.gluten.sql.columnar.filescan", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")) {
val df = sql("select * from my_parquet")
val testFileSourceScanExecTransformer =
df.queryExecution.executedPlan.collect {
case f: TestFileSourceScanExecTransformer => f
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
index e53e28fa27..058a63a67d 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/gluten/GlutenFallbackSuite.scala
@@ -38,7 +38,7 @@ class GlutenFallbackSuite extends GlutenSQLTestsTrait with
AdaptiveSparkPlanHelp
override def sparkConf: SparkConf = {
super.sparkConf
.set(GlutenConfig.RAS_ENABLED.key, "false")
- .set("spark.gluten.ui.enabled", "true")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "true")
// The gluten ui event test suite expects the spark ui to be enable
.set(UI_ENABLED, true)
}
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 859e6df484..264a7e7836 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.hive.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.{DebugFilesystem, SparkConf}
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.TableIdentifier
@@ -25,8 +27,8 @@ class GlutenHiveSQLQueryCHSuite extends
GlutenHiveSQLQuerySuiteBase {
override def sparkConf: SparkConf = {
defaultSparkConf
.set("spark.plugins", "org.apache.gluten.GlutenPlugin")
- .set("spark.gluten.sql.enable.native.validation", "false")
- .set("spark.gluten.sql.native.writer.enabled", "true")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
+ .set(GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")
.set("spark.sql.storeAssignmentPolicy", "legacy")
.set("spark.default.parallelism", "1")
.set("spark.memory.offHeap.enabled", "true")
diff --git
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index f45b5c659c..5bf53e66e6 100644
---
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.statistics
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.GlutenPlan
import org.apache.gluten.utils.BackendTestUtils
@@ -60,7 +61,7 @@ class SparkFunctionStatistics extends QueryTest {
sparkBuilder
.config("spark.io.compression.codec", "LZ4")
.config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.files.openCostInBytes", "134217728")
.config("spark.unsafe.exceptionOnMemoryLeak", "true")
.getOrCreate()
diff --git
a/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
b/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
index 061f5660e3..d3ca2acdd6 100644
---
a/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
+++
b/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.BackendTestUtils
import org.apache.spark.SparkConf
@@ -52,10 +53,10 @@ abstract class BaseMergeTwoPhasesHashBaseAggregateSuite
extends WholeStageTransf
.set("spark.sql.files.maxPartitionBytes", "1g")
.set("spark.sql.shuffle.partitions", "1")
.set("spark.memory.offHeap.size", "2g")
- .set("spark.gluten.sql.mergeTwoPhasesAggregate.enabled", "true")
+ .set(GlutenConfig.MERGE_TWO_PHASES_ENABLED.key, "true")
if (BackendTestUtils.isCHBackendLoaded()) {
conf
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
}
conf
}
diff --git
a/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
b/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
index 6841a87d13..cda49a7c63 100644
---
a/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
+++
b/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
@@ -39,10 +39,10 @@ class GlutenExpressionMappingSuite
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
.set("spark.ui.enabled", "false")
- .set("spark.gluten.ui.enabled", "false")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
if (BackendTestUtils.isCHBackendLoaded()) {
conf
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
}
conf
}
diff --git
a/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
b/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
index 837d37236b..f8a40ff6cd 100644
---
a/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
+++
b/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.extension
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{HashAggregateExecBaseTransformer,
ProjectExecTransformer, WholeStageTransformerSuite}
import org.apache.gluten.utils.BackendTestUtils
@@ -31,7 +32,7 @@ class GlutenExtensionRewriteRuleSuite extends
WholeStageTransformerSuite {
.set("spark.sql.adaptive.enabled", "false")
if (BackendTestUtils.isCHBackendLoaded()) {
conf
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
}
conf
}
diff --git
a/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
b/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
index 0e997a4b01..e5f263f423 100644
--- a/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
+++ b/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
@@ -35,10 +35,10 @@ class SQLQuerySuite extends WholeStageTransformerSuite {
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
.set("spark.ui.enabled", "false")
- .set("spark.gluten.ui.enabled", "false")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
if (BackendTestUtils.isCHBackendLoaded()) {
conf
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
}
conf
}
diff --git
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
index 2a47c0e9e0..fde2e59b53 100644
---
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
+++
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{ProjectExecTransformer, TransformSupport,
WholeStageTransformerSuite}
import org.apache.gluten.utils.BackendTestUtils
@@ -40,10 +41,10 @@ class GlutenExpressionDataTypesValidation extends
WholeStageTransformerSuite {
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
.set("spark.ui.enabled", "false")
- .set("spark.gluten.ui.enabled", "false")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
if (BackendTestUtils.isCHBackendLoaded()) {
conf
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
}
conf
}
diff --git
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
index 84d6ccd585..a92b563789 100644
---
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
+++
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.datasources
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.ColumnarToCarrierRowExecBase
import org.apache.spark.SparkConf
@@ -33,7 +34,7 @@ class GlutenNoopWriterRuleSuite extends GlutenQueryTest with
SharedSparkSession
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
.set("spark.ui.enabled", "false")
- .set("spark.gluten.ui.enabled", "false")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
}
class WriterColumnarListener extends QueryExecutionListener {
diff --git
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/execution/GlutenSQLRangeExecSuite.scala
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/execution/GlutenSQLRangeExecSuite.scala
index 3532485380..03663bc156 100644
---
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/execution/GlutenSQLRangeExecSuite.scala
+++
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/execution/GlutenSQLRangeExecSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.ColumnarRangeBaseExec
import org.apache.gluten.utils.BackendTestUtils
@@ -33,10 +34,10 @@ class GlutenSQLRangeExecSuite extends GlutenQueryTest with
SharedSparkSession {
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
.set("spark.ui.enabled", "false")
- .set("spark.gluten.ui.enabled", "false")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
if (BackendTestUtils.isCHBackendLoaded()) {
conf
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
}
conf
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]