This is an automated email from the ASF dual-hosted git repository.
yuanzhou pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git
The following commit(s) were added to refs/heads/main by this push:
new 34bae918ab [MINOR] Import config keys instead of hard-coding string
values for tests in backends (#9855)
34bae918ab is described below
commit 34bae918ab81ff2fd0a11fd6d7c0f737eca171b2
Author: Yongkyun Lee <[email protected]>
AuthorDate: Mon Jul 21 13:18:58 2025 -0700
[MINOR] Import config keys instead of hard-coding string values for tests
in backends (#9855)
Follow-up of #9834.
This PR updates tests in the two backend modules to import configs rather
than hard-coding the string constants of the configs.
---
...ickHouseRSSColumnarMemorySortShuffleSuite.scala | 4 ++-
.../ClickHouseIcebergHiveTableSupport.scala | 9 ++++--
.../kafka/ClickhouseGlutenKafkaScanSuite.scala | 2 +-
.../gluten/backendsapi/clickhouse/CHBackend.scala | 2 +-
.../test/scala/org/apache/gluten/RunTPCHTest.scala | 2 +-
.../GlutenClickHouseExcelFormatSuite.scala | 2 +-
.../execution/GlutenClickHouseJoinSuite.scala | 3 +-
.../execution/GlutenClickHouseNativeLibSuite.scala | 2 +-
.../GlutenClickHouseSyntheticDataSuite.scala | 4 ++-
.../GlutenClickHouseTPCDSAbstractSuite.scala | 3 +-
.../GlutenClickHouseTPCHAbstractSuite.scala | 3 +-
...lutenClickHouseWholeStageTransformerSuite.scala | 3 +-
.../GlutenClickhouseCountDistinctSuite.scala | 3 +-
.../GlutenCoalesceAggregationUnionSuite.scala | 3 +-
.../execution/GlutenEliminateJoinSuite.scala | 5 ++--
.../execution/GlutenFunctionValidateSuite.scala | 11 +++----
.../gluten/execution/GlutenNothingValueCheck.scala | 6 ++--
.../GlutenClickhouseFunctionSuite.scala | 2 +-
.../extension/GlutenCustomAggExpressionSuite.scala | 3 +-
.../hive/GlutenClickHouseHiveTableSuite.scala | 7 +++--
.../GlutenClickHouseNativeWriteTableSuite.scala | 2 +-
.../GlutenClickHouseMergeTreeWriteSuite.scala | 5 ++--
.../metrics/GlutenClickHouseTPCHMetricsSuite.scala | 3 +-
...kHouseTPCDSParquetColumnarShuffleAQESuite.scala | 5 ++--
.../GlutenClickHouseTPCDSParquetRFSuite.scala | 5 ++--
...nClickHouseTPCDSParquetSortMergeJoinSuite.scala | 3 +-
.../tpcds/GlutenClickHouseTPCDSParquetSuite.scala | 5 ++--
...ckHouseTPCHColumnarShuffleParquetAQESuite.scala | 2 +-
.../tpch/GlutenClickHouseTPCHParquetAQESuite.scala | 5 ++--
...nClickHouseTPCHSaltNullNativeParquetSuite.scala | 3 +-
.../GlutenClickHouseTPCHSaltNullParquetSuite.scala | 6 ++--
.../scala/org/apache/gluten/s3/S3AuthSuite.scala | 10 +++----
.../benchmarks/CHAggAndShuffleBenchmark.scala | 3 +-
.../execution/benchmarks/CHSqlBasedBenchmark.scala | 4 ++-
...ckhouseCustomerExpressionTransformerSuite.scala | 3 +-
.../main/java/org/apache/gluten/TestConfUtil.java | 15 ++++++----
.../AutoAdjustStageResourceProfileSuite.scala | 2 +-
.../execution/DynamicOffHeapSizingSuite.scala | 5 ++--
.../apache/gluten/execution/FallbackSuite.scala | 2 +-
.../gluten/execution/MiscOperatorSuite.scala | 35 +++++++++++-----------
.../execution/VeloxAggregateFunctionsSuite.scala | 12 ++++----
.../gluten/execution/VeloxHashJoinSuite.scala | 5 ++--
.../gluten/execution/VeloxMetricsSuite.scala | 4 +--
.../VeloxOrcDataTypeValidationSuite.scala | 8 +++--
.../VeloxParquetDataTypeValidationSuite.scala | 8 +++--
.../apache/gluten/execution/VeloxTPCDSSuite.scala | 6 ++--
.../apache/gluten/execution/VeloxTPCHSuite.scala | 18 ++++++-----
.../gluten/expression/UDFPartialProjectSuite.scala | 9 +++---
.../apache/gluten/expression/VeloxUdfSuite.scala | 7 +++--
.../functions/DateFunctionsValidateSuite.scala | 5 ++--
.../functions/MathFunctionsValidateSuite.scala | 5 ++--
.../functions/ScalarFunctionsValidateSuite.scala | 4 +--
.../apache/gluten/fuzzer/RowToColumnarFuzzer.scala | 3 +-
.../spark/sql/execution/GlutenHiveUDFSuite.scala | 3 +-
.../execution/VeloxParquetWriteForHiveSuite.scala | 7 +++--
.../sql/execution/VeloxParquetWriteSuite.scala | 3 +-
.../execution/benchmark/VeloxRasBenchmark.scala | 2 +-
.../org/apache/gluten/config/GlutenConfig.scala | 4 +--
.../execution/WholeStageTransformerSuite.scala | 2 +-
59 files changed, 184 insertions(+), 133 deletions(-)
diff --git
a/backends-clickhouse/src-celeborn/test/scala/org/apache/gluten/execution/GlutenClickHouseRSSColumnarMemorySortShuffleSuite.scala
b/backends-clickhouse/src-celeborn/test/scala/org/apache/gluten/execution/GlutenClickHouseRSSColumnarMemorySortShuffleSuite.scala
index fc5a929e68..456dd72461 100644
---
a/backends-clickhouse/src-celeborn/test/scala/org/apache/gluten/execution/GlutenClickHouseRSSColumnarMemorySortShuffleSuite.scala
+++
b/backends-clickhouse/src-celeborn/test/scala/org/apache/gluten/execution/GlutenClickHouseRSSColumnarMemorySortShuffleSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.backendsapi.clickhouse.CHConfig
+
import org.apache.spark.SparkConf
class GlutenClickHouseRSSColumnarMemorySortShuffleSuite extends MergeTreeSuite
{
@@ -32,7 +34,7 @@ class GlutenClickHouseRSSColumnarMemorySortShuffleSuite
extends MergeTreeSuite
.set("spark.sql.adaptive.enabled", "true")
.set("spark.shuffle.service.enabled", "false")
.set("spark.celeborn.client.spark.shuffle.writer", "hash")
- .set("spark.gluten.sql.columnar.backend.ch.forceMemorySortShuffle",
"true")
+ .set(CHConfig.COLUMNAR_CH_FORCE_MEMORY_SORT_SHUFFLE.key, "true")
}
final override val testCases: Seq[Int] = Seq(
diff --git
a/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergHiveTableSupport.scala
b/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergHiveTableSupport.scala
index c9e6a8ee5e..f17274cf98 100644
---
a/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergHiveTableSupport.scala
+++
b/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergHiveTableSupport.scala
@@ -18,6 +18,9 @@
package org.apache.gluten.execution.iceberg
import com.google.common.base.Strings
+
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import
org.apache.spark.sql.execution.datasources.v2.clickhouse.ClickHouseConfig
@@ -47,10 +50,10 @@ class ClickHouseIcebergHiveTableSupport {
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
- .set("spark.gluten.sql.parquet.maxmin.index", "true")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
+ .set(GlutenConfig.ENABLE_PARQUET_ROW_GROUP_MAX_MIN_INDEX.key, "true")
.set("spark.hive.exec.dynamic.partition.mode", "nonstrict")
- .set("spark.gluten.supported.hive.udfs", "my_add")
+ .set(GlutenConfig.GLUTEN_SUPPORTED_HIVE_UDFS.key, "my_add")
.set("spark.shuffle.manager",
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.sql.adaptive.enabled", "true")
.set("spark.sql.shuffle.partitions", "2")
diff --git
a/backends-clickhouse/src-kafka/test/scala/org/apache/gluten/execution/kafka/ClickhouseGlutenKafkaScanSuite.scala
b/backends-clickhouse/src-kafka/test/scala/org/apache/gluten/execution/kafka/ClickhouseGlutenKafkaScanSuite.scala
index 67b85e274e..cc07de8ce1 100644
---
a/backends-clickhouse/src-kafka/test/scala/org/apache/gluten/execution/kafka/ClickhouseGlutenKafkaScanSuite.scala
+++
b/backends-clickhouse/src-kafka/test/scala/org/apache/gluten/execution/kafka/ClickhouseGlutenKafkaScanSuite.scala
@@ -65,7 +65,7 @@ class ClickhouseGlutenKafkaScanSuite
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
}
diff --git
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
index c331c8badd..141cdaa24c 100644
---
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
+++
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHBackend.scala
@@ -112,7 +112,7 @@ object CHBackendSettings extends BackendSettingsApi with
Logging {
private val GLUTEN_CLICKHOUSE_SHUFFLE_SUPPORTED_CODEC: Set[String] =
Set("lz4", "zstd", "snappy")
// The algorithm for hash partition of the shuffle
- private val GLUTEN_CLICKHOUSE_SHUFFLE_HASH_ALGORITHM: String =
+ val GLUTEN_CLICKHOUSE_SHUFFLE_HASH_ALGORITHM: String =
CHConfig.prefixOf("shuffle.hash.algorithm")
// valid values are: cityHash64 or sparkMurmurHash3_32
private val GLUTEN_CLICKHOUSE_SHUFFLE_HASH_ALGORITHM_DEFAULT =
"sparkMurmurHash3_32"
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala
index 1face5147f..f4a76691ed 100644
--- a/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala
+++ b/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala
@@ -97,7 +97,7 @@ object RunTPCHTest {
.config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
.config("spark.gluten.sql.columnar.iterator", "true")
.config("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.sql.columnVector.offheap.enabled", "true")
.config("spark.memory.offHeap.enabled", "true")
.config("spark.memory.offHeap.size", offHeapSize)
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseExcelFormatSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseExcelFormatSuite.scala
index 761d48b0ce..4606b36d18 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseExcelFormatSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseExcelFormatSuite.scala
@@ -1345,7 +1345,7 @@ class GlutenClickHouseExcelFormatSuite extends
GlutenClickHouseWholeStageTransfo
withSQLConf(
(CHConfig.runtimeSettings("use_excel_serialization"), "false"),
- ("spark.gluten.sql.text.input.empty.as.default", "true")) {
+ (GlutenConfig.TEXT_INPUT_EMPTY_AS_DEFAULT.key, "true")) {
compareResultsAgainstVanillaSpark(
"""
| select * from TEST_MEASURE
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
index ab92cc1b29..844b12def2 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
@@ -17,6 +17,7 @@
package org.apache.gluten.execution
import org.apache.gluten.backendsapi.clickhouse.CHConfig
+import org.apache.gluten.config.GlutenConfig
import org.apache.spark.SparkConf
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
@@ -38,7 +39,7 @@ class GlutenClickHouseJoinSuite extends
GlutenClickHouseWholeStageTransformerSui
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
.set(
"spark.sql.warehouse.dir",
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
index f91e841942..b35d119721 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
@@ -31,7 +31,7 @@ class GlutenClickHouseNativeLibSuite extends PlanTest {
.set("spark.default.parallelism", "1")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
}
test("test columnar lib path not exist") {
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
index 95c05d93a8..b68409324b 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.sql.Row
@@ -51,7 +53,7 @@ class GlutenClickHouseSyntheticDataSuite
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
.set("spark.sql.legacy.createHiveTableByDefault", "false")
.set("spark.shuffle.manager", "sort")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
index df08be1ffc..08b84b7428 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
@@ -17,6 +17,7 @@
package org.apache.gluten.execution
import org.apache.gluten.benchmarks.GenTPCDSTableScripts
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.{Arm, UTSystemParameters}
import org.apache.spark.SparkConf
@@ -133,7 +134,7 @@ abstract class GlutenClickHouseTPCDSAbstractSuite
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
/* .set("spark.sql.catalogImplementation", "hive")
.set("javax.jdo.option.ConnectionURL", s"jdbc:derby:;databaseName=${
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
index b3530811e4..e97e32252a 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.test.TPCHCHSchema
import org.apache.spark.{SparkConf, SparkEnv}
@@ -292,7 +293,7 @@ abstract class GlutenClickHouseTPCHAbstractSuite
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
}
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
index af33880e27..2a3ccc751e 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
@@ -17,6 +17,7 @@
package org.apache.gluten.execution
import org.apache.gluten.backendsapi.clickhouse.RuntimeConfig
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.utils.{HDFSTestHelper, MinioTestHelper,
UTSystemParameters}
import org.apache.spark.{SPARK_VERSION_SHORT, SparkConf}
@@ -84,7 +85,7 @@ class GlutenClickHouseWholeStageTransformerSuite
import org.apache.gluten.backendsapi.clickhouse.CHConfig._
val conf = super.sparkConf
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
.setCHConfig("user_defined_path", "/tmp/user_defined")
.set(RuntimeConfig.PATH.key, UTSystemParameters.diskOutputDataPath)
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickhouseCountDistinctSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickhouseCountDistinctSuite.scala
index 694b578c91..bad39ef09b 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickhouseCountDistinctSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickhouseCountDistinctSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.test.AllDataTypesWithComplexType
import org.apache.gluten.test.AllDataTypesWithComplexType.genTestData
@@ -127,7 +128,7 @@ class GlutenClickhouseCountDistinctSuite extends
GlutenClickHouseWholeStageTrans
"Gluten-5618: [CH] Fix 'Position x is out of bound in Block' error " +
"when executing count distinct") {
- withSQLConf(("spark.gluten.sql.countDistinctWithoutExpand", "false")) {
+ withSQLConf((GlutenConfig.ENABLE_COUNT_DISTINCT_WITHOUT_EXPAND.key,
"false")) {
val sql =
"""
|select count(distinct a, b, c) from
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenCoalesceAggregationUnionSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenCoalesceAggregationUnionSuite.scala
index fdd7482757..5df4a41352 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenCoalesceAggregationUnionSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenCoalesceAggregationUnionSuite.scala
@@ -17,6 +17,7 @@
package org.apache.gluten.execution
import org.apache.gluten.backendsapi.clickhouse.CHBackendSettings
+import org.apache.gluten.config.GlutenConfig
import org.apache.spark.SparkConf
import org.apache.spark.sql.{DataFrame, Row}
@@ -43,7 +44,7 @@ class GlutenCoalesceAggregationUnionSuite extends
GlutenClickHouseWholeStageTran
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
.set("spark.shuffle.manager", "sort")
.set("spark.io.compression.codec", "snappy")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenEliminateJoinSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenEliminateJoinSuite.scala
index 2f6f0023ef..923d6583a5 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenEliminateJoinSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenEliminateJoinSuite.scala
@@ -18,6 +18,7 @@ package org.apache.gluten.execution
import org.apache.gluten.backendsapi.clickhouse._
import org.apache.gluten.backendsapi.clickhouse.CHBackendSettings
+import org.apache.gluten.config.GlutenConfig
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
@@ -50,13 +51,13 @@ class GlutenEliminateJoinSuite extends
GlutenClickHouseWholeStageTransformerSuit
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
.set("spark.shuffle.manager", "sort")
.set("spark.io.compression.codec", "snappy")
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "-1")
- .set("spark.gluten.supported.scala.udfs",
"compare_substrings:compare_substrings")
+ .set(GlutenConfig.GLUTEN_SUPPORTED_SCALA_UDFS.key,
"compare_substrings:compare_substrings")
.set(CHConfig.runtimeSettings("max_memory_usage_ratio_for_streaming_aggregating"),
"0.01")
.set(CHConfig.runtimeSettings("high_cardinality_threshold_for_streaming_aggregating"),
"0.2")
.set(CHBackendSettings.GLUTEN_JOIN_AGGREGATE_TO_AGGREGATE_UNION, "true")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
index 10506cf9af..51ba9fd8d3 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
@@ -17,6 +17,7 @@
package org.apache.gluten.execution
import org.apache.gluten.backendsapi.clickhouse.CHConfig
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.expression.{FlattenedAnd, FlattenedOr}
import org.apache.spark.SparkConf
@@ -55,13 +56,13 @@ class GlutenFunctionValidateSuite extends
GlutenClickHouseWholeStageTransformerS
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
.set("spark.shuffle.manager", "sort")
.set("spark.io.compression.codec", "snappy")
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "10MB")
- .set("spark.gluten.supported.scala.udfs",
"compare_substrings:compare_substrings")
+ .set(GlutenConfig.GLUTEN_SUPPORTED_SCALA_UDFS.key,
"compare_substrings:compare_substrings")
}
override def beforeAll(): Unit = {
@@ -300,7 +301,7 @@ class GlutenFunctionValidateSuite extends
GlutenClickHouseWholeStageTransformerS
checkPlan(df.queryExecution.analyzed, path)
}
- withSQLConf(("spark.gluten.sql.collapseGetJsonObject.enabled", "true")) {
+ withSQLConf((GlutenConfig.ENABLE_COLLAPSE_GET_JSON_OBJECT.key, "true")) {
runQueryAndCompare(
"select get_json_object(get_json_object(string_field1, '$.a'), '$.y')
" +
" from json_test where int_field1 = 6") {
@@ -752,7 +753,7 @@ class GlutenFunctionValidateSuite extends
GlutenClickHouseWholeStageTransformerS
}
}
- withSQLConf(("spark.gluten.sql.commonSubexpressionEliminate", "true")) {
+ withSQLConf((GlutenConfig.ENABLE_COMMON_SUBEXPRESSION_ELIMINATE.key,
"true")) {
// CSE in project
runQueryAndCompare("select hash(id), hash(id)+1, hash(id)-1 from
range(10)") {
df => checkOperatorCount[ProjectExecTransformer](2)(df)
@@ -857,7 +858,7 @@ class GlutenFunctionValidateSuite extends
GlutenClickHouseWholeStageTransformerS
test("avg(bigint) overflow") {
withSQLConf(
- "spark.gluten.sql.columnar.forceShuffledHashJoin" -> "false",
+ GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false",
"spark.sql.autoBroadcastJoinThreshold" -> "-1") {
withTable("myitem") {
sql("create table big_int(id bigint) using parquet")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenNothingValueCheck.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenNothingValueCheck.scala
index fd64d610a0..7bc1d2fbf9 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenNothingValueCheck.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenNothingValueCheck.scala
@@ -16,6 +16,8 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.sql.Row
@@ -45,13 +47,13 @@ class GlutenNothingValueCheck extends
GlutenClickHouseWholeStageTransformerSuite
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.warehouse.dir", warehouse)
.set("spark.shuffle.manager", "sort")
.set("spark.io.compression.codec", "snappy")
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "-1")
- .set("spark.gluten.supported.scala.udfs",
"compare_substrings:compare_substrings")
+ .set(GlutenConfig.GLUTEN_SUPPORTED_SCALA_UDFS.key,
"compare_substrings:compare_substrings")
.set(
SQLConf.OPTIMIZER_EXCLUDED_RULES.key,
ConstantFolding.ruleName + "," + NullPropagation.ruleName)
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
index 3ca3255382..ebf809c95a 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
@@ -47,7 +47,7 @@ class GlutenClickhouseFunctionSuite extends ParquetSuite {
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
// TODO: support default ANSI policy
.set("spark.sql.storeAssignmentPolicy", "legacy")
.set("spark.sql.warehouse.dir", warehouse)
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/extension/GlutenCustomAggExpressionSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/extension/GlutenCustomAggExpressionSuite.scala
index 1363591a02..900ab11ea2 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/extension/GlutenCustomAggExpressionSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/extension/GlutenCustomAggExpressionSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.extension
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution._
import org.apache.gluten.substrait.SubstraitContext
import org.apache.gluten.utils.SubstraitPlanPrinterUtil
@@ -32,7 +33,7 @@ class GlutenCustomAggExpressionSuite extends ParquetSuite {
super.sparkConf
.set("spark.sql.adaptive.enabled", "false")
.set(
- "spark.gluten.sql.columnar.extended.expressions.transformer",
+ GlutenConfig.EXTENDED_EXPRESSION_TRAN_CONF.key,
"org.apache.gluten.execution.extension.CustomAggExpressionTransformer")
}
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
index f53fa397cb..7aeec48b1d 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.hive
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{FileSourceScanExecTransformer,
GlutenClickHouseWholeStageTransformerSuite, ProjectExecTransformer,
TransformSupport}
import org.apache.gluten.test.AllDataTypesWithComplexType
@@ -54,13 +55,13 @@ class GlutenClickHouseHiveTableSuite
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
- .set("spark.gluten.sql.parquet.maxmin.index", "true")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
+ .set(GlutenConfig.ENABLE_PARQUET_ROW_GROUP_MAX_MIN_INDEX.key, "true")
.set(
"spark.sql.warehouse.dir",
this.getClass.getResource("/").getPath +
"tests-working-home/spark-warehouse")
.set("spark.hive.exec.dynamic.partition.mode", "nonstrict")
- .set("spark.gluten.supported.hive.udfs", "my_add")
+ .set(GlutenConfig.GLUTEN_SUPPORTED_HIVE_UDFS.key, "my_add")
.setCHConfig("use_local_format", true)
.set("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension")
.set(
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
index 81bf35e2eb..9b08f56c66 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
@@ -59,7 +59,7 @@ class GlutenClickHouseNativeWriteTableSuite
.set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
.set("spark.gluten.sql.columnar.iterator", "true")
.set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
// TODO: support default ANSI policy
.set("spark.sql.storeAssignmentPolicy", "legacy")
.set("spark.sql.warehouse.dir", getWarehouseDir)
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/mergetree/GlutenClickHouseMergeTreeWriteSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/mergetree/GlutenClickHouseMergeTreeWriteSuite.scala
index 336c98333d..8ac4375846 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/mergetree/GlutenClickHouseMergeTreeWriteSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/mergetree/GlutenClickHouseMergeTreeWriteSuite.scala
@@ -16,7 +16,7 @@
*/
package org.apache.gluten.execution.mergetree
-import org.apache.gluten.backendsapi.clickhouse.{CHConfig, RuntimeSettings}
+import org.apache.gluten.backendsapi.clickhouse.{CHBackendSettings, CHConfig,
RuntimeSettings}
import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution._
import org.apache.gluten.utils.Arm
@@ -1873,8 +1873,7 @@ class GlutenClickHouseMergeTreeWriteSuite extends
CreateMergeTreeSuite {
|""".stripMargin)
Seq(("-1", 3), ("3", 3), ("6", 1)).foreach(
conf => {
- withSQLConf(
- "spark.gluten.sql.columnar.backend.ch.files.per.partition.threshold"
-> conf._1) {
+
withSQLConf(CHBackendSettings.GLUTEN_CLICKHOUSE_FILES_PER_PARTITION_THRESHOLD
-> conf._1) {
val sql =
s"""
|select count(1), min(l_returnflag) from lineitem_split
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/metrics/GlutenClickHouseTPCHMetricsSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/metrics/GlutenClickHouseTPCHMetricsSuite.scala
index 4a5a923162..f5a70731d5 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/metrics/GlutenClickHouseTPCHMetricsSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/metrics/GlutenClickHouseTPCHMetricsSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.metrics
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution._
import org.apache.gluten.execution.GlutenPlan
@@ -116,7 +117,7 @@ class GlutenClickHouseTPCHMetricsSuite extends
ParquetTPCHSuite {
}
test("Check the metrics values") {
- withSQLConf(("spark.gluten.sql.columnar.sort", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_SORT_ENABLED.key, "false")) {
customCheck(1, native = false) {
df =>
val plans = df.queryExecution.executedPlan.collect {
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetColumnarShuffleAQESuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetColumnarShuffleAQESuite.scala
index e8fea04f2c..ac4640640f 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetColumnarShuffleAQESuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetColumnarShuffleAQESuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.tpcds
+import org.apache.gluten.backendsapi.clickhouse.CHBackendSettings
import org.apache.gluten.execution._
import org.apache.spark.SparkConf
@@ -245,7 +246,7 @@ class GlutenClickHouseTPCDSParquetColumnarShuffleAQESuite
Seq(("-1", 8), ("100", 8), ("2000", 1)).foreach(
conf => {
withSQLConf(
-
("spark.gluten.sql.columnar.backend.ch.files.per.partition.threshold" ->
conf._1)) {
+ (CHBackendSettings.GLUTEN_CLICKHOUSE_FILES_PER_PARTITION_THRESHOLD
-> conf._1)) {
val sql =
s"""
|select count(1) from store_sales
@@ -268,7 +269,7 @@ class GlutenClickHouseTPCDSParquetColumnarShuffleAQESuite
test("GLUTEN-7971: Support using left side as the build table for the left
anti/semi join") {
withSQLConf(
("spark.sql.autoBroadcastJoinThreshold", "-1"),
- ("spark.gluten.sql.columnar.backend.ch.convert.left.anti_semi.to.right",
"true")) {
+ (CHBackendSettings.GLUTEN_CLICKHOUSE_CONVERT_LEFT_ANTI_SEMI_TO_RIGHT,
"true")) {
val sql1 =
s"""
|select
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetRFSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetRFSuite.scala
index 657a6e3214..553bb50e15 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetRFSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetRFSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.tpcds
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.GlutenClickHouseTPCDSAbstractSuite
import org.apache.spark.SparkConf
@@ -30,8 +31,8 @@ class GlutenClickHouseTPCDSParquetRFSuite extends
GlutenClickHouseTPCDSAbstractS
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "10MB")
.set("spark.memory.offHeap.size", "8g")
- .set("spark.gluten.sql.validation.logLevel", "ERROR")
- .set("spark.gluten.sql.validation.printStackOnFailure", "true")
+ .set(GlutenConfig.VALIDATION_LOG_LEVEL.key, "ERROR")
+ .set(GlutenConfig.VALIDATION_PRINT_FAILURE_STACK.key, "true")
// radically small threshold to force runtime bloom filter
.set("spark.sql.optimizer.runtime.bloomFilter.applicationSideScanSizeThreshold",
"1KB")
.set("spark.sql.optimizer.runtime.bloomFilter.enabled", "true")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSortMergeJoinSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSortMergeJoinSuite.scala
index 585d43be89..cdb85c2941 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSortMergeJoinSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSortMergeJoinSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.tpcds
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{CHShuffledHashJoinExecTransformer,
CHSortMergeJoinExecTransformer, GlutenClickHouseTPCDSAbstractSuite}
import org.apache.gluten.test.FallbackUtil
@@ -52,7 +53,7 @@ class GlutenClickHouseTPCDSParquetSortMergeJoinSuite extends
GlutenClickHouseTPC
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "-1")
.set("spark.memory.offHeap.size", "6g")
- .set("spark.gluten.sql.columnar.forceShuffledHashJoin", "false")
+ .set(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "false")
.setMaster("local[2]")
}
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSuite.scala
index 6a13e8e664..846df06909 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpcds/GlutenClickHouseTPCDSParquetSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.tpcds
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution._
import org.apache.spark.SparkConf
@@ -37,8 +38,8 @@ class GlutenClickHouseTPCDSParquetSuite extends
GlutenClickHouseTPCDSAbstractSui
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "10MB")
.set("spark.memory.offHeap.size", "4g")
- .set("spark.gluten.sql.validation.logLevel", "ERROR")
- .set("spark.gluten.sql.validation.printStackOnFailure", "true")
+ .set(GlutenConfig.VALIDATION_LOG_LEVEL.key, "ERROR")
+ .set(GlutenConfig.VALIDATION_PRINT_FAILURE_STACK.key, "true")
.setCHConfig("enable_grace_aggregate_spill_test", "true")
}
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHColumnarShuffleParquetAQESuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHColumnarShuffleParquetAQESuite.scala
index 39f9413a01..367aa920db 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHColumnarShuffleParquetAQESuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHColumnarShuffleParquetAQESuite.scala
@@ -100,7 +100,7 @@ class GlutenClickHouseTPCHColumnarShuffleParquetAQESuite
extends ParquetTPCHSuit
}
test("Check the metrics values") {
- withSQLConf(("spark.gluten.sql.columnar.sort", "false")) {
+ withSQLConf((GlutenConfig.COLUMNAR_SORT_ENABLED.key, "false")) {
customCheck(1, native = false) {
df =>
assert(df.queryExecution.executedPlan.isInstanceOf[AdaptiveSparkPlanExec])
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHParquetAQESuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHParquetAQESuite.scala
index 25d2903ef0..6dcab37380 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHParquetAQESuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHParquetAQESuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution.tpch
+import org.apache.gluten.backendsapi.clickhouse.CHBackendSettings
import org.apache.gluten.execution._
import org.apache.spark.SparkConf
@@ -37,7 +38,7 @@ class GlutenClickHouseTPCHParquetAQESuite extends
ParquetTPCHSuite {
.set("spark.sql.autoBroadcastJoinThreshold", "10MB")
.set("spark.sql.adaptive.enabled", "true")
.setCHConfig("use_local_format", true)
- .set("spark.gluten.sql.columnar.backend.ch.shuffle.hash.algorithm",
"sparkMurmurHash3_32")
+ .set(CHBackendSettings.GLUTEN_CLICKHOUSE_SHUFFLE_HASH_ALGORITHM,
"sparkMurmurHash3_32")
}
final override val testCases: Seq[Int] = Seq(
@@ -149,7 +150,7 @@ class GlutenClickHouseTPCHParquetAQESuite extends
ParquetTPCHSuite {
test("GLUTEN-7971:Q21 Support using left side as the build table for the
left anti/semi join") {
withSQLConf(
("spark.sql.autoBroadcastJoinThreshold", "-1"),
- ("spark.gluten.sql.columnar.backend.ch.convert.left.anti_semi.to.right",
"true")) {
+ (CHBackendSettings.GLUTEN_CLICKHOUSE_CONVERT_LEFT_ANTI_SEMI_TO_RIGHT,
"true")) {
customCheck(21, compare = false) {
df =>
assert(df.queryExecution.executedPlan.isInstanceOf[AdaptiveSparkPlanExec])
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullNativeParquetSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullNativeParquetSuite.scala
index 778a591b8d..805afabc5c 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullNativeParquetSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullNativeParquetSuite.scala
@@ -17,6 +17,7 @@
package org.apache.gluten.execution.tpch
import org.apache.gluten.backendsapi.clickhouse.CHConfig
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution._
import org.apache.spark.SparkConf
@@ -32,7 +33,7 @@ class GlutenClickHouseTPCHSaltNullNativeParquetSuite
.set("spark.io.compression.codec", "snappy")
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "10MB")
- .set("spark.gluten.supported.scala.udfs", "my_add")
+ .set(GlutenConfig.GLUTEN_SUPPORTED_SCALA_UDFS.key, "my_add")
.set(
CHConfig.runtimeSettings("input_format_parquet_use_native_reader_with_filter_push_down"),
"true")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullParquetSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullParquetSuite.scala
index f88de605f7..341a10cb94 100644
---
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullParquetSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/tpch/GlutenClickHouseTPCHSaltNullParquetSuite.scala
@@ -79,7 +79,7 @@ class GlutenClickHouseTPCHSaltNullParquetSuite
.set("spark.io.compression.codec", "snappy")
.set("spark.sql.shuffle.partitions", "5")
.set("spark.sql.autoBroadcastJoinThreshold", "10MB")
- .set("spark.gluten.supported.scala.udfs", "my_add")
+ .set(GlutenConfig.GLUTEN_SUPPORTED_SCALA_UDFS.key, "my_add")
}
final override val testCases: Seq[Int] = Seq(
@@ -1194,8 +1194,8 @@ class GlutenClickHouseTPCHSaltNullParquetSuite
test("test 'ColumnarToRowExec should not be used'") {
withSQLConf(
- "spark.gluten.sql.columnar.filescan" -> "false",
- "spark.gluten.sql.columnar.filter" -> "false"
+ GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key -> "false",
+ GlutenConfig.COLUMNAR_FILTER_ENABLED.key -> "false"
) {
runQueryAndCompare(
"select l_shipdate from lineitem where l_shipdate = '1996-05-07'",
diff --git
a/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala
index 78b96ce6a2..5235da2ac8 100644
--- a/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala
+++ b/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala
@@ -116,7 +116,7 @@ class S3AuthSuite extends AnyFunSuite {
.config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
.config("spark.memory.offHeap.enabled", "true")
.config("spark.memory.offHeap.size", "1g")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.hadoop.fs.s3a.endpoint", trustedOwnedEndpoint)
// The following two configs are provided to help hadoop-aws to pass.
// They're not required by native code (they don't have prefix
spark.hadoop so
@@ -147,7 +147,7 @@ class S3AuthSuite extends AnyFunSuite {
.config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
.config("spark.memory.offHeap.enabled", "true")
.config("spark.memory.offHeap.size", "1g")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.hadoop.fs.s3a.endpoint", trustingEndpoint)
.config(
"spark.hadoop.fs.s3a.aws.credentials.provider",
@@ -185,7 +185,7 @@ class S3AuthSuite extends AnyFunSuite {
.config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
.config("spark.memory.offHeap.enabled", "true")
.config("spark.memory.offHeap.size", "1g")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.hadoop.fs.s3a.endpoint", trustingEndpoint2)
.config(
"spark.hadoop.fs.s3a.aws.credentials.provider",
@@ -234,7 +234,7 @@ class S3AuthSuite extends AnyFunSuite {
.config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
.config("spark.memory.offHeap.enabled", "true")
.config("spark.memory.offHeap.size", "1g")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.hadoop.fs.s3a.endpoint", trustingEndpoint2)
.config(
"spark.hadoop.fs.s3a.aws.credentials.provider",
@@ -304,7 +304,7 @@ class S3AuthSuite extends AnyFunSuite {
.config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
.config("spark.memory.offHeap.enabled", "true")
.config("spark.memory.offHeap.size", "1g")
- .config("spark.gluten.sql.enable.native.validation", "false")
+ .config(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.config("spark.hadoop.fs.s3a.endpoint", cnEndpoint)
.config("spark.hadoop.fs.s3a.access.key", cnAK)
.config("spark.hadoop.fs.s3a.secret.key", cnSK)
diff --git
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHAggAndShuffleBenchmark.scala
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHAggAndShuffleBenchmark.scala
index e914fb74a9..3f7ac3eccc 100644
---
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHAggAndShuffleBenchmark.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHAggAndShuffleBenchmark.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution.benchmarks
+import org.apache.gluten.backendsapi.clickhouse.CHBackendSettings
import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{FileSourceScanExecTransformer,
ProjectExecTransformer, WholeStageTransformer}
import org.apache.gluten.sql.shims.SparkShimLoader
@@ -67,7 +68,7 @@ object CHAggAndShuffleBenchmark extends SqlBasedBenchmark
with CHSqlBasedBenchma
override def getSparkSession: SparkSession = {
beforeAll()
val conf = getSparkConf
- .set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
+ .set(CHBackendSettings.GLUTEN_CLICKHOUSE_SEP_SCAN_RDD, "false")
.setIfMissing("spark.sql.shuffle.partitions", shufflePartition)
.setIfMissing("spark.shuffle.manager", "sort")
.setIfMissing("spark.io.compression.codec", "SNAPPY")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
index 76bcb552fc..34760329cc 100644
---
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
@@ -16,6 +16,8 @@
*/
package org.apache.spark.sql.execution.benchmarks
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import org.apache.spark.sql.delta.DeltaLog
import org.apache.spark.sql.execution.benchmark.SqlBasedBenchmark
@@ -38,7 +40,7 @@ trait CHSqlBasedBenchmark extends SqlBasedBenchmark {
.set("spark.databricks.delta.snapshotPartitions", "1")
.set("spark.databricks.delta.properties.defaults.checkpointInterval",
"5")
.set("spark.databricks.delta.stalenessLimit", "3600000")
- .set("spark.gluten.sql.enable.native.validation", "false")
+ .set(GlutenConfig.NATIVE_VALIDATION_ENABLED.key, "false")
.set("spark.sql.adaptive.enabled", "false")
.setIfMissing("spark.memory.offHeap.size", offheapSize)
.setIfMissing("spark.sql.columnVector.offheap.enabled", "true")
diff --git
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/extension/GlutenClickhouseCustomerExpressionTransformerSuite.scala
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/extension/GlutenClickhouseCustomerExpressionTransformerSuite.scala
index cd8bf579fa..e7e2f3ac52 100644
---
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/extension/GlutenClickhouseCustomerExpressionTransformerSuite.scala
+++
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/extension/GlutenClickhouseCustomerExpressionTransformerSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.extension
+import org.apache.gluten.config.GlutenConfig
import
org.apache.gluten.execution.{GlutenClickHouseWholeStageTransformerSuite,
ProjectExecTransformer}
import org.apache.gluten.expression.ExpressionConverter
@@ -87,7 +88,7 @@ class GlutenClickhouseCustomerExpressionTransformerSuite
super.sparkConf
.set("spark.sql.adaptive.enabled", "false")
.set(
- "spark.gluten.sql.columnar.extended.expressions.transformer",
+ GlutenConfig.EXTENDED_EXPRESSION_TRAN_CONF.key,
"org.apache.spark.sql.extension.CustomerExpressionTransformer")
}
diff --git
a/backends-velox/src-iceberg/main/java/org/apache/gluten/TestConfUtil.java
b/backends-velox/src-iceberg/main/java/org/apache/gluten/TestConfUtil.java
index f037b9034b..1ad6faca83 100644
--- a/backends-velox/src-iceberg/main/java/org/apache/gluten/TestConfUtil.java
+++ b/backends-velox/src-iceberg/main/java/org/apache/gluten/TestConfUtil.java
@@ -23,9 +23,14 @@ import java.util.Map;
public class TestConfUtil {
public static Map<String, Object> GLUTEN_CONF =
ImmutableMap.of(
- "spark.plugins", "org.apache.gluten.GlutenPlugin",
- "spark.memory.offHeap.enabled", "true",
- "spark.memory.offHeap.size", "1024MB",
- "spark.ui.enabled", "false",
- "spark.gluten.ui.enabled", "false");
+ "spark.plugins",
+ "org.apache.gluten.GlutenPlugin",
+ "spark.memory.offHeap.enabled",
+ "true",
+ "spark.memory.offHeap.size",
+ "1024MB",
+ "spark.ui.enabled",
+ "false",
+ "spark.gluten.ui.enabled",
+ "false");
}
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/AutoAdjustStageResourceProfileSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/AutoAdjustStageResourceProfileSuite.scala
index a6b25e5d64..f094ce1408 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/AutoAdjustStageResourceProfileSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/AutoAdjustStageResourceProfileSuite.scala
@@ -39,7 +39,7 @@ class AutoAdjustStageResourceProfileSuite
.set("spark.memory.offHeap.size", "2g")
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
.set("spark.sql.adaptive.enabled", "true")
- .set("spark.gluten.auto.adjustStageResource.enabled", "true")
+ .set(GlutenConfig.AUTO_ADJUST_STAGE_RESOURCE_PROFILE_ENABLED.key, "true")
}
override def beforeAll(): Unit = {
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/DynamicOffHeapSizingSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/DynamicOffHeapSizingSuite.scala
index 645deec16e..50485a3d57 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/DynamicOffHeapSizingSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/DynamicOffHeapSizingSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenCoreConfig
import org.apache.gluten.memory.memtarget.DynamicOffHeapSizingMemoryTarget
import org.apache.spark.SparkConf
@@ -35,8 +36,8 @@ class DynamicOffHeapSizingSuite extends
VeloxWholeStageTransformerSuite {
.set("spark.executor.memory", "2GB")
.set("spark.memory.offHeap.enabled", "false")
.set("spark.memory.offHeap.size", "0")
- .set("spark.gluten.memory.dynamic.offHeap.sizing.memory.fraction",
"0.95")
- .set("spark.gluten.memory.dynamic.offHeap.sizing.enabled", "true")
+ .set(GlutenCoreConfig.DYNAMIC_OFFHEAP_SIZING_MEMORY_FRACTION.key, "0.95")
+ .set(GlutenCoreConfig.DYNAMIC_OFFHEAP_SIZING_ENABLED.key, "true")
}
test("Dynamic off-heap sizing") {
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/FallbackSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/FallbackSuite.scala
index 542593abb1..8efbb18ef2 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/FallbackSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/FallbackSuite.scala
@@ -109,7 +109,7 @@ class FallbackSuite extends VeloxWholeStageTransformerSuite
with AdaptiveSparkPl
test("offload BroadcastExchange and fall back BHJ") {
withSQLConf(
- "spark.gluten.sql.columnar.broadcastJoin" -> "false"
+ GlutenConfig.COLUMNAR_BROADCAST_JOIN_ENABLED.key -> "false"
) {
runQueryAndCompare(
"""
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
index 333267e838..e14dd1fcb5 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
@@ -355,7 +355,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
test("window expression") {
Seq(("sort", 0), ("streaming", 1)).foreach {
case (windowType, localSortSize) =>
- withSQLConf("spark.gluten.sql.columnar.backend.velox.window.type" ->
windowType) {
+ withSQLConf(VeloxConfig.COLUMNAR_VELOX_WINDOW_TYPE.key -> windowType) {
runQueryAndCompare(
"select max(l_partkey) over" +
" (partition by l_suppkey order by l_commitdate" +
@@ -715,10 +715,9 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
test("combine small batches before shuffle") {
val minBatchSize = 15
withSQLConf(
- "spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput" ->
"true",
- "spark.gluten.sql.columnar.maxBatchSize" -> "2",
-
"spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput.minSize" ->
- s"$minBatchSize"
+ VeloxConfig.COLUMNAR_VELOX_RESIZE_BATCHES_SHUFFLE_INPUT.key -> "true",
+ GlutenConfig.COLUMNAR_MAX_BATCH_SIZE.key -> "2",
+ VeloxConfig.COLUMNAR_VELOX_RESIZE_BATCHES_SHUFFLE_INPUT_MIN_SIZE.key ->
s"$minBatchSize"
) {
val df = runQueryAndCompare(
"select l_orderkey, sum(l_partkey) as sum from lineitem " +
@@ -736,8 +735,8 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
withSQLConf(
- "spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput" ->
"true",
- "spark.gluten.sql.columnar.maxBatchSize" -> "2"
+ VeloxConfig.COLUMNAR_VELOX_RESIZE_BATCHES_SHUFFLE_INPUT.key -> "true",
+ GlutenConfig.COLUMNAR_MAX_BATCH_SIZE.key -> "2"
) {
val df = runQueryAndCompare(
"select l_orderkey, sum(l_partkey) as sum from lineitem " +
@@ -783,7 +782,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
test("Improve the local sort ensure requirements") {
withSQLConf(
"spark.sql.autoBroadcastJoinThreshold" -> "-1",
- "spark.gluten.sql.columnar.forceShuffledHashJoin" -> "false") {
+ GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "false") {
withTable("t1", "t2") {
sql("""
|create table t1 using parquet as
@@ -1281,7 +1280,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
test("Test sample op") {
- withSQLConf("spark.gluten.sql.columnarSampleEnabled" -> "true") {
+ withSQLConf(GlutenConfig.COLUMNAR_SAMPLE_ENABLED.key -> "true") {
withTable("t") {
sql("create table t (id int, b boolean) using parquet")
sql("insert into t values (1, true), (2, false), (3, null), (4, true),
(5, false)")
@@ -1303,7 +1302,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
|select cast(id as int) as c1, cast(id as string) c2 from
range(100) order by c1 desc;
|""".stripMargin)
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" -> "true")
{
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "true") {
runQueryAndCompare(
"""
|select * from t1 cross join t2 on t1.c1 = t2.c1;
@@ -1323,7 +1322,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
}
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"""
|select * from t1 cross join t2 on t1.c1 = t2.c1;
@@ -1333,7 +1332,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
}
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"""
|select * from t1 left semi join t2 on t1.c1 = t2.c1 and t1.c1 >
50;
@@ -1381,7 +1380,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
|create table t2 using parquet as
|select cast(id as int) as c1, cast(id as string) c2 from
range(100) order by c1 desc;
|""".stripMargin)
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"""
|select * from t1 inner join t2 on t1.c1 = t2.c1 and t1.c1 > 50;
@@ -1391,7 +1390,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
}
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"""
|select * from t1 left join t2 on t1.c1 = t2.c1 and t1.c1 > 50;
@@ -1401,7 +1400,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
}
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"""
|select * from t1 left semi join t2 on t1.c1 = t2.c1 and t1.c1 >
50;
@@ -1411,7 +1410,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
}
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"""
|select * from t1 right join t2 on t1.c1 = t2.c1 and t1.c1 > 50;
@@ -1421,7 +1420,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
}
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"""
|select * from t1 left anti join t2 on t1.c1 = t2.c1 and t1.c1 >
50;
@@ -2109,7 +2108,7 @@ class MiscOperatorSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
test("Blacklist expression can be handled by ColumnarPartialProject") {
- withSQLConf("spark.gluten.expression.blacklist" -> "regexp_replace") {
+ withSQLConf(GlutenConfig.EXPRESSION_BLACK_LIST.key -> "regexp_replace") {
runQueryAndCompare(
"SELECT c_custkey, c_name, regexp_replace(c_comment, '\\w',
'something') FROM customer") {
df =>
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxAggregateFunctionsSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxAggregateFunctionsSuite.scala
index c1700d487b..b5ebcf1785 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxAggregateFunctionsSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxAggregateFunctionsSuite.scala
@@ -49,7 +49,7 @@ abstract class VeloxAggregateFunctionsSuite extends
VeloxWholeStageTransformerSu
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
.set("spark.sql.autoBroadcastJoinThreshold", "-1")
.set("spark.sql.sources.useV1SourceList", "avro")
- .set("spark.gluten.sql.mergeTwoPhasesAggregate.enabled", "false")
+ .set(GlutenConfig.MERGE_TWO_PHASES_ENABLED.key, "false")
}
test("count") {
@@ -1276,10 +1276,10 @@ class VeloxAggregateFunctionsFlushSuite extends
VeloxAggregateFunctionsSuite {
test("flushable aggregate rule - double sum when floatingPointMode is
strict") {
withSQLConf(
- "spark.gluten.sql.columnar.backend.velox.maxPartialAggregationMemory" ->
"100",
- "spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput" ->
"false",
- "spark.gluten.sql.columnar.maxBatchSize" -> "2",
- "spark.gluten.sql.columnar.backend.velox.floatingPointMode" -> "strict"
+ VeloxConfig.MAX_PARTIAL_AGGREGATION_MEMORY.key -> "100",
+ VeloxConfig.COLUMNAR_VELOX_RESIZE_BATCHES_SHUFFLE_INPUT.key -> "false",
+ GlutenConfig.COLUMNAR_MAX_BATCH_SIZE.key -> "2",
+ VeloxConfig.FLOATING_POINT_MODE.key -> "strict"
) {
withTempView("t1") {
import testImplicits._
@@ -1302,7 +1302,7 @@ class VeloxAggregateFunctionsFlushSuite extends
VeloxAggregateFunctionsSuite {
test("flushable aggregate rule - double sum when floatingPointMode is
loose") {
withSQLConf(
- "spark.gluten.sql.columnar.backend.velox.floatingPointMode" -> "loose"
+ VeloxConfig.FLOATING_POINT_MODE.key -> "loose"
) {
withTempView("t1") {
import testImplicits._
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxHashJoinSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxHashJoinSuite.scala
index 5f06bd1c74..f242b24f34 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxHashJoinSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxHashJoinSuite.scala
@@ -42,7 +42,8 @@ class VeloxHashJoinSuite extends
VeloxWholeStageTransformerSuite {
withSQLConf(
("spark.sql.autoBroadcastJoinThreshold", "-1"),
("spark.sql.adaptive.enabled", "false"),
- ("spark.gluten.sql.columnar.forceShuffledHashJoin", "true")) {
+ (GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "true")
+ ) {
createTPCHNotNullTables()
val df = spark.sql("""select l_partkey from
| lineitem join part join partsupp
@@ -77,7 +78,7 @@ class VeloxHashJoinSuite extends
VeloxWholeStageTransformerSuite {
withSQLConf(
("spark.sql.autoBroadcastJoinThreshold", "-1"),
("spark.sql.adaptive.enabled", "false"),
- ("spark.gluten.sql.columnar.forceShuffledHashJoin", "true"),
+ (GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key, "true"),
("spark.sql.sources.useV1SourceList", "avro")
) {
createTPCHNotNullTables()
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxMetricsSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxMetricsSuite.scala
index fe4b0c4736..954a1eacc5 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxMetricsSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxMetricsSuite.scala
@@ -169,7 +169,7 @@ class VeloxMetricsSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
}
test("Metrics of noop filter's children") {
- withSQLConf("spark.gluten.ras.enabled" -> "true") {
+ withSQLConf(GlutenConfig.RAS_ENABLED.key -> "true") {
runQueryAndCompare("SELECT c1, c2 FROM metrics_t1 where c1 < 50") {
df =>
val scan = find(df.queryExecution.executedPlan) {
@@ -186,7 +186,7 @@ class VeloxMetricsSuite extends
VeloxWholeStageTransformerSuite with AdaptiveSpa
test("Write metrics") {
if (SparkShimLoader.getSparkVersion.startsWith("3.4")) {
- withSQLConf(("spark.gluten.sql.native.writer.enabled", "true")) {
+ withSQLConf((GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")) {
runQueryAndCompare(
"Insert into table metrics_t1 values(1 , 2)"
) {
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxOrcDataTypeValidationSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxOrcDataTypeValidationSuite.scala
index e47524ce92..8847d1dd98 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxOrcDataTypeValidationSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxOrcDataTypeValidationSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import java.io.File
@@ -288,7 +290,7 @@ class VeloxOrcDataTypeValidationSuite extends
VeloxWholeStageTransformerSuite {
// Validation: ShuffledHashJoin.
withSQLConf(
- "spark.gluten.sql.columnar.forceShuffledHashJoin" -> "true",
+ GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "true",
"spark.sql.autoBroadcastJoinThreshold" -> "-1") {
runQueryAndCompare(
"select type1.date from type1," +
@@ -299,7 +301,7 @@ class VeloxOrcDataTypeValidationSuite extends
VeloxWholeStageTransformerSuite {
// Validation: SortMergeJoin.
withSQLConf("spark.sql.autoBroadcastJoinThreshold" -> "-1") {
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"select type1.date from type1," +
" type2 where type1.date = type2.date") {
@@ -466,7 +468,7 @@ class VeloxOrcDataTypeValidationSuite extends
VeloxWholeStageTransformerSuite {
}
ignore("Velox Parquet Write") {
- withSQLConf(("spark.gluten.sql.native.writer.enabled", "true")) {
+ withSQLConf((GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")) {
withTempDir {
dir =>
val write_path = dir.toURI.getPath
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxParquetDataTypeValidationSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxParquetDataTypeValidationSuite.scala
index f1279229a9..c8ecb366ec 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxParquetDataTypeValidationSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxParquetDataTypeValidationSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import java.io.File
@@ -287,7 +289,7 @@ class VeloxParquetDataTypeValidationSuite extends
VeloxWholeStageTransformerSuit
// Validation: ShuffledHashJoin.
withSQLConf(
- "spark.gluten.sql.columnar.forceShuffledHashJoin" -> "true",
+ GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key -> "true",
"spark.sql.autoBroadcastJoinThreshold" -> "-1") {
runQueryAndCompare(
"select type1.date from type1," +
@@ -298,7 +300,7 @@ class VeloxParquetDataTypeValidationSuite extends
VeloxWholeStageTransformerSuit
// Validation: SortMergeJoin.
withSQLConf("spark.sql.autoBroadcastJoinThreshold" -> "-1") {
- withSQLConf("spark.gluten.sql.columnar.forceShuffledHashJoin" ->
"false") {
+ withSQLConf(GlutenConfig.COLUMNAR_FORCE_SHUFFLED_HASH_JOIN_ENABLED.key
-> "false") {
runQueryAndCompare(
"select type1.date from type1," +
" type2 where type1.date = type2.date") {
@@ -464,7 +466,7 @@ class VeloxParquetDataTypeValidationSuite extends
VeloxWholeStageTransformerSuit
}
test("Velox Parquet Write") {
- withSQLConf(("spark.gluten.sql.native.writer.enabled", "true")) {
+ withSQLConf((GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")) {
withTempDir {
dir =>
val write_path = dir.toURI.getPath
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCDSSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCDSSuite.scala
index 72cd81a019..ab172c075c 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCDSSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCDSSuite.scala
@@ -16,6 +16,8 @@
*/
package org.apache.gluten.execution
+import org.apache.gluten.config.GlutenConfig
+
import org.apache.spark.SparkConf
import org.apache.spark.sql.DataFrame
@@ -55,8 +57,8 @@ class VeloxTPCDSSuite extends VeloxWholeStageTransformerSuite
{
.set("spark.driver.maxResultSize", "4g")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.adaptive.enabled", "true")
- .set("spark.gluten.sql.columnar.maxBatchSize", "4096")
- .set("spark.gluten.shuffleWriter.bufferSize", "4096")
+ .set(GlutenConfig.COLUMNAR_MAX_BATCH_SIZE.key, "4096")
+ .set(GlutenConfig.SHUFFLE_WRITER_BUFFER_SIZE.key, "4096")
.set("spark.executor.memory", "4g")
.set("spark.executor.instances", "16")
.set("spark.executor.cores", "8")
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCHSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCHSuite.scala
index 2d7645bedf..9fd24e6cd1 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCHSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/execution/VeloxTPCHSuite.scala
@@ -17,6 +17,7 @@
package org.apache.gluten.execution
import org.apache.gluten.config.GlutenConfig
+import org.apache.gluten.config.GlutenCoreConfig
import org.apache.gluten.config.VeloxConfig
import org.apache.spark.SparkConf
@@ -44,7 +45,7 @@ abstract class VeloxTPCHTableSupport extends
VeloxWholeStageTransformerSuite {
.set("spark.shuffle.manager",
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.sql.files.maxPartitionBytes", "1g")
.set("spark.sql.shuffle.partitions", "1")
- .set("spark.gluten.sql.columnar.backend.velox.memInitCapacity", "1m")
+ .set(VeloxConfig.COLUMNAR_VELOX_MEM_INIT_CAPACITY.key, "1m")
.set("spark.memory.offHeap.size", "2g")
.set("spark.unsafe.exceptionOnMemoryLeak", "true")
.set("spark.sql.autoBroadcastJoinThreshold", "-1")
@@ -259,7 +260,8 @@ class VeloxTPCHDistinctSpillSuite extends
VeloxTPCHTableSupport {
override protected def sparkConf: SparkConf = {
super.sparkConf
.set("spark.memory.offHeap.size", "50m")
- .set("spark.gluten.memory.overAcquiredMemoryRatio", "0.9") // to trigger
distinct spill early
+ // to trigger distinct spill early
+ .set(GlutenCoreConfig.COLUMNAR_MEMORY_OVER_ACQUIRED_RATIO.key, "0.9")
.set(GlutenConfig.GLUTEN_COLUMNAR_TO_ROW_MEM_THRESHOLD.key, "8k")
}
@@ -376,7 +378,7 @@ class VeloxTPCHV1RasSuite extends VeloxTPCHSuite {
super.sparkConf
.set("spark.sql.sources.useV1SourceList", "parquet")
.set("spark.sql.autoBroadcastJoinThreshold", "-1")
- .set("spark.gluten.ras.enabled", "true")
+ .set(GlutenConfig.RAS_ENABLED.key, "true")
}
}
@@ -387,7 +389,7 @@ class VeloxTPCHV1BhjRasSuite extends VeloxTPCHSuite {
super.sparkConf
.set("spark.sql.sources.useV1SourceList", "parquet")
.set("spark.sql.autoBroadcastJoinThreshold", "30M")
- .set("spark.gluten.ras.enabled", "true")
+ .set(GlutenConfig.RAS_ENABLED.key, "true")
}
}
@@ -429,8 +431,8 @@ class VeloxTPCHV1GlutenBhjVanillaBeSuite extends
VeloxTPCHSuite {
super.sparkConf
.set("spark.sql.sources.useV1SourceList", "parquet")
.set("spark.sql.autoBroadcastJoinThreshold", "30M")
- .set("spark.gluten.sql.columnar.broadcastJoin", "true")
- .set("spark.gluten.sql.columnar.broadcastExchange", "false")
+ .set(GlutenConfig.COLUMNAR_BROADCAST_JOIN_ENABLED.key, "true")
+ .set(GlutenConfig.COLUMNAR_BROADCAST_EXCHANGE_ENABLED.key, "false")
}
}
@@ -441,7 +443,7 @@ class VeloxTPCHV1VanillaBhjGlutenBeSuite extends
VeloxTPCHSuite {
super.sparkConf
.set("spark.sql.sources.useV1SourceList", "parquet")
.set("spark.sql.autoBroadcastJoinThreshold", "30M")
- .set("spark.gluten.sql.columnar.broadcastJoin", "false")
- .set("spark.gluten.sql.columnar.broadcastExchange", "true")
+ .set(GlutenConfig.COLUMNAR_BROADCAST_JOIN_ENABLED.key, "false")
+ .set(GlutenConfig.COLUMNAR_BROADCAST_EXCHANGE_ENABLED.key, "true")
}
}
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/expression/UDFPartialProjectSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/expression/UDFPartialProjectSuite.scala
index 83eb67acae..5152cbc457 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/expression/UDFPartialProjectSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/expression/UDFPartialProjectSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.expression
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{ColumnarPartialProjectExec,
WholeStageTransformerSuite}
import org.apache.spark.SparkConf
@@ -30,14 +31,14 @@ case class MyStruct(a: Long, b: Array[Long])
class UDFPartialProjectSuiteRasOff extends UDFPartialProjectSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "false")
+ .set(GlutenConfig.RAS_ENABLED.key, "false")
}
}
class UDFPartialProjectSuiteRasOn extends UDFPartialProjectSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "true")
+ .set(GlutenConfig.RAS_ENABLED.key, "true")
}
}
@@ -223,8 +224,8 @@ abstract class UDFPartialProjectSuite extends
WholeStageTransformerSuite {
Seq("false", "true").foreach {
enableNativeScanAndWriter =>
withSQLConf(
- "spark.gluten.sql.native.writer.enabled" ->
enableNativeScanAndWriter,
- "spark.gluten.sql.columnar.batchscan" -> enableNativeScanAndWriter
+ GlutenConfig.NATIVE_WRITER_ENABLED.key -> enableNativeScanAndWriter,
+ GlutenConfig.COLUMNAR_BATCHSCAN_ENABLED.key ->
enableNativeScanAndWriter
) {
withTable("t1") {
spark.sql("""
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/expression/VeloxUdfSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/expression/VeloxUdfSuite.scala
index 086551ff43..8a260493f2 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/expression/VeloxUdfSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/expression/VeloxUdfSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.expression
+import org.apache.gluten.backendsapi.velox.VeloxBackendSettings
import org.apache.gluten.execution.ProjectExecTransformer
import org.apache.gluten.tags.{SkipTest, UDFTest}
@@ -240,7 +241,7 @@ class VeloxUdfSuiteLocal extends VeloxUdfSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
.set("spark.files", udfLibPath)
- .set("spark.gluten.sql.columnar.backend.velox.udfLibraryPaths",
udfLibRelativePath)
+ .set(VeloxBackendSettings.GLUTEN_VELOX_UDF_LIB_PATHS, udfLibRelativePath)
.set("spark.shuffle.manager",
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
}
}
@@ -277,8 +278,8 @@ class VeloxUdfSuiteCluster extends VeloxUdfSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
.set("spark.files", udfLibPath)
- .set("spark.gluten.sql.columnar.backend.velox.driver.udfLibraryPaths",
driverUdfLibPath)
- .set("spark.gluten.sql.columnar.backend.velox.udfLibraryPaths",
udfLibRelativePath)
+ .set(VeloxBackendSettings.GLUTEN_VELOX_DRIVER_UDF_LIB_PATHS,
driverUdfLibPath)
+ .set(VeloxBackendSettings.GLUTEN_VELOX_UDF_LIB_PATHS, udfLibRelativePath)
.set("spark.driver.extraClassPath", glutenJar)
.set("spark.executor.extraClassPath", glutenJar)
}
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/functions/DateFunctionsValidateSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/functions/DateFunctionsValidateSuite.scala
index e85ef46944..05475da481 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/functions/DateFunctionsValidateSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/functions/DateFunctionsValidateSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.functions
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.ProjectExecTransformer
import org.apache.spark.SparkConf
@@ -27,14 +28,14 @@ import java.sql.Timestamp
class DateFunctionsValidateSuiteRasOff extends DateFunctionsValidateSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "false")
+ .set(GlutenConfig.RAS_ENABLED.key, "false")
}
}
class DateFunctionsValidateSuiteRasOn extends DateFunctionsValidateSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "true")
+ .set(GlutenConfig.RAS_ENABLED.key, "true")
}
}
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/functions/MathFunctionsValidateSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/functions/MathFunctionsValidateSuite.scala
index d980c2708e..ba7d98991e 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/functions/MathFunctionsValidateSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/functions/MathFunctionsValidateSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.functions
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.{BatchScanExecTransformer,
ProjectExecTransformer}
import org.apache.spark.SparkConf
@@ -24,14 +25,14 @@ import org.apache.spark.sql.Row
class MathFunctionsValidateSuiteRasOff extends MathFunctionsValidateSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "false")
+ .set(GlutenConfig.RAS_ENABLED.key, "false")
}
}
class MathFunctionsValidateSuiteRasOn extends MathFunctionsValidateSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "true")
+ .set(GlutenConfig.RAS_ENABLED.key, "true")
}
}
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/functions/ScalarFunctionsValidateSuite.scala
b/backends-velox/src/test/scala/org/apache/gluten/functions/ScalarFunctionsValidateSuite.scala
index 0ce16d9bde..5fbbabe71a 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/functions/ScalarFunctionsValidateSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/functions/ScalarFunctionsValidateSuite.scala
@@ -26,14 +26,14 @@ import org.apache.spark.sql.execution.ProjectExec
class ScalarFunctionsValidateSuiteRasOff extends ScalarFunctionsValidateSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "false")
+ .set(GlutenConfig.RAS_ENABLED.key, "false")
}
}
class ScalarFunctionsValidateSuiteRasOn extends ScalarFunctionsValidateSuite {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.ras.enabled", "true")
+ .set(GlutenConfig.RAS_ENABLED.key, "true")
}
}
diff --git
a/backends-velox/src/test/scala/org/apache/gluten/fuzzer/RowToColumnarFuzzer.scala
b/backends-velox/src/test/scala/org/apache/gluten/fuzzer/RowToColumnarFuzzer.scala
index 20efa3ffbc..863613b4c6 100644
---
a/backends-velox/src/test/scala/org/apache/gluten/fuzzer/RowToColumnarFuzzer.scala
+++
b/backends-velox/src/test/scala/org/apache/gluten/fuzzer/RowToColumnarFuzzer.scala
@@ -16,6 +16,7 @@
*/
package org.apache.gluten.fuzzer
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.RowToVeloxColumnarExec
import org.apache.gluten.fuzzer.FuzzerResult.Successful
import org.apache.gluten.tags.{FuzzerTest, SkipTest}
@@ -29,7 +30,7 @@ class RowToColumnarFuzzer extends FuzzerBase {
override protected def sparkConf: SparkConf = {
super.sparkConf
- .set("spark.gluten.sql.columnar.filescan", "false")
+ .set(GlutenConfig.COLUMNAR_FILESCAN_ENABLED.key, "false")
}
private def checkOperators(df: DataFrame): Unit = {
diff --git
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/GlutenHiveUDFSuite.scala
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/GlutenHiveUDFSuite.scala
index 1e628343e7..e6505decb6 100644
---
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/GlutenHiveUDFSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/GlutenHiveUDFSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.ColumnarPartialProjectExec
import org.apache.gluten.expression.UDFMappings
import org.apache.gluten.udf.CustomerUDF
@@ -89,7 +90,7 @@ class GlutenHiveUDFSuite extends GlutenQueryTest with
SQLTestUtils {
.set("spark.default.parallelism", "1")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
- .set("spark.gluten.sql.native.writer.enabled", "true")
+ .set(GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")
}
private def withTempFunction(funcName: String)(f: => Unit): Unit = {
diff --git
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
index 2201457a72..73040069cf 100644
---
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteForHiveSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.VeloxColumnarToCarrierRowExec
import org.apache.spark.SparkConf
@@ -89,7 +90,7 @@ class VeloxParquetWriteForHiveSuite
.set("spark.default.parallelism", "1")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
- .set("spark.gluten.sql.native.writer.enabled", "true")
+ .set(GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")
}
private def checkNativeWrite(sqlStr: String, checkNative: Boolean): Unit = {
@@ -277,7 +278,7 @@ class VeloxParquetWriteForHiveSuite
test("native writer should respect table properties") {
Seq(true, false).foreach {
enableNativeWrite =>
- withSQLConf("spark.gluten.sql.native.writer.enabled" ->
enableNativeWrite.toString) {
+ withSQLConf(GlutenConfig.NATIVE_WRITER_ENABLED.key ->
enableNativeWrite.toString) {
withTable("t") {
withSQLConf(
"spark.sql.hive.convertMetastoreParquet" -> "false",
@@ -412,7 +413,7 @@ class VeloxParquetWriteForHiveSuite
"3.3") {
Seq(false, true).foreach {
enableNativeWrite =>
- withSQLConf("spark.gluten.sql.native.writer.enabled" ->
enableNativeWrite.toString) {
+ withSQLConf(GlutenConfig.NATIVE_WRITER_ENABLED.key ->
enableNativeWrite.toString) {
withTable("t") {
withSQLConf(
"spark.sql.hive.convertMetastoreParquet" -> "false",
diff --git
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteSuite.scala
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteSuite.scala
index d19d279fbb..3b3129090b 100644
---
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteSuite.scala
+++
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/VeloxParquetWriteSuite.scala
@@ -16,6 +16,7 @@
*/
package org.apache.spark.sql.execution
+import org.apache.gluten.config.GlutenConfig
import org.apache.gluten.execution.VeloxWholeStageTransformerSuite
import org.apache.gluten.test.FallbackUtil
@@ -53,7 +54,7 @@ class VeloxParquetWriteSuite extends
VeloxWholeStageTransformerSuite {
}
override protected def sparkConf: SparkConf = {
- super.sparkConf.set("spark.gluten.sql.native.writer.enabled", "true")
+ super.sparkConf.set(GlutenConfig.NATIVE_WRITER_ENABLED.key, "true")
}
test("test Array(Struct) fallback") {
diff --git
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
index 2c0384a98d..9a9ae6407a 100644
---
a/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
+++
b/backends-velox/src/test/scala/org/apache/spark/sql/execution/benchmark/VeloxRasBenchmark.scala
@@ -64,7 +64,7 @@ object VeloxRasBenchmark extends SqlBasedBenchmark {
.config("spark.plugins", "org.apache.gluten.GlutenPlugin")
.config("spark.shuffle.manager",
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.config("spark.ui.enabled", "false")
- .config("spark.gluten.ui.enabled", "false")
+ .config(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
.config("spark.memory.offHeap.enabled", "true")
.config("spark.memory.offHeap.size", "2g")
.config("spark.sql.adaptive.enabled", "false")
diff --git
a/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala
b/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala
index 793a180392..15d6c582ae 100644
---
a/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala
+++
b/gluten-substrait/src/main/scala/org/apache/gluten/config/GlutenConfig.scala
@@ -292,7 +292,7 @@ class GlutenConfig(conf: SQLConf) extends
GlutenCoreConfig(conf) {
}
def printStackOnValidationFailure: Boolean =
- getConf(VALIDATION_PRINT_FAILURE_STACK_)
+ getConf(VALIDATION_PRINT_FAILURE_STACK)
def validationFailFast: Boolean = getConf(VALIDATION_FAIL_FAST)
@@ -1199,7 +1199,7 @@ object GlutenConfig {
"Valid values are 'trace', 'debug', 'info', 'warn' and 'error'.")
.createWithDefault("WARN")
- val VALIDATION_PRINT_FAILURE_STACK_ =
+ val VALIDATION_PRINT_FAILURE_STACK =
buildConf("spark.gluten.sql.validation.printStackOnFailure")
.internal()
.booleanConf
diff --git
a/gluten-substrait/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
b/gluten-substrait/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
index a1dc1fe14a..af25b13555 100644
---
a/gluten-substrait/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
+++
b/gluten-substrait/src/test/scala/org/apache/gluten/execution/WholeStageTransformerSuite.scala
@@ -96,7 +96,7 @@ abstract class WholeStageTransformerSuite
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "1024MB")
.set("spark.ui.enabled", "false")
- .set("spark.gluten.ui.enabled", "false")
+ .set(GlutenConfig.GLUTEN_UI_ENABLED.key, "false")
}
protected def checkFallbackOperators(df: DataFrame, num: Int): Unit = {
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]