This is an automated email from the ASF dual-hosted git repository.

hongze pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/incubator-gluten.git


The following commit(s) were added to refs/heads/main by this push:
     new df66e93a72 [GLUTEN-8479][CORE][Part-2] All configurations should be 
defined through ConfigEntry
df66e93a72 is described below

commit df66e93a72b1fb270cf7024545810110f99ce97e
Author: Kaifei Yi <[email protected]>
AuthorDate: Tue Jan 21 10:19:25 2025 +0800

    [GLUTEN-8479][CORE][Part-2] All configurations should be defined through 
ConfigEntry
---
 .../execution/iceberg/ClickHouseIcebergSuite.scala |  20 +-
 .../backendsapi/clickhouse/CHListenerApi.scala     |   7 +-
 .../apache/spark/rpc/GlutenDriverEndpoint.scala    |   4 +-
 .../test/scala/org/apache/gluten/RunTPCHTest.scala |   2 +-
 .../org/apache/gluten/affinity/CHUTAffinity.scala  |   4 +-
 .../execution/GlutenClickHouseJoinSuite.scala      |   2 +-
 .../GlutenClickHouseNativeExceptionSuite.scala     |   2 +-
 .../execution/GlutenClickHouseNativeLibSuite.scala |   6 +-
 .../GlutenClickHouseSyntheticDataSuite.scala       |   2 +-
 .../GlutenClickHouseTPCDSAbstractSuite.scala       |   2 +-
 .../GlutenClickHouseTPCHAbstractSuite.scala        |   2 +-
 ...lutenClickHouseWholeStageTransformerSuite.scala |   2 +-
 .../execution/GlutenFunctionValidateSuite.scala    |   2 +-
 .../GlutenClickhouseFunctionSuite.scala            |   2 +-
 .../hive/GlutenClickHouseHiveTableSuite.scala      |   2 +-
 .../GlutenClickHouseNativeWriteTableSuite.scala    |   2 +-
 .../scala/org/apache/gluten/s3/S3AuthSuite.scala   |  10 +-
 .../execution/benchmarks/CHSqlBasedBenchmark.scala |   5 +-
 .../VeloxCelebornColumnarBatchSerializer.scala     |   2 +-
 .../VeloxCelebornColumnarShuffleWriter.scala       |   3 +-
 .../writer/VeloxUniffleColumnarShuffleWriter.java  |   5 +-
 .../backendsapi/velox/VeloxListenerApi.scala       |  10 +-
 .../backendsapi/velox/VeloxSparkPlanExecApi.scala  |   3 +-
 .../apache/gluten/utils/SharedLibraryLoader.scala  |  12 +-
 .../vectorized/ColumnarBatchSerializer.scala       |   3 +-
 .../spark/shuffle/ColumnarShuffleWriter.scala      |   3 +-
 .../gluten/execution/MiscOperatorSuite.scala       |   4 +-
 .../shuffle/CelebornColumnarShuffleWriter.scala    |   3 +-
 .../memory/memtarget/ThrowOnOomMemoryTarget.java   |  18 +-
 .../scala/org/apache/gluten/GlutenPlugin.scala     |  83 +++---
 .../gluten/extension/GlutenSessionExtensions.scala |   2 +-
 .../gluten/softaffinity/SoftAffinityManager.scala  |  27 +-
 .../org/apache/gluten/execution/IcebergSuite.scala |  16 +-
 .../org/apache/gluten/expression/UDFMappings.scala |   6 +-
 .../execution/ColumnarShuffleExchangeExec.scala    |   6 +-
 .../spark/softaffinity/SoftAffinitySuite.scala     |   6 +-
 .../SoftAffinityWithRDDInfoSuite.scala             |   8 +-
 .../spark/sql/DummyFilterColmnarHelper.scala       |   2 +-
 .../apache/spark/sql/GlutenSQLTestsBaseTrait.scala |   2 +-
 .../org/apache/spark/sql/GlutenTestsTrait.scala    |   2 +-
 .../apache/spark/sql/GlutenSQLQueryTestSuite.scala |   2 +-
 .../benchmarks/ParquetReadBenchmark.scala          |   4 +-
 .../execution/joins/GlutenBroadcastJoinSuite.scala |   2 +-
 .../hive/execution/GlutenHiveSQLQueryCHSuite.scala |   2 +-
 .../sql/statistics/SparkFunctionStatistics.scala   |   2 +-
 .../apache/spark/sql/GlutenSQLQueryTestSuite.scala |   2 +-
 .../benchmarks/ParquetReadBenchmark.scala          |   4 +-
 .../execution/joins/GlutenBroadcastJoinSuite.scala |   2 +-
 .../hive/execution/GlutenHiveSQLQueryCHSuite.scala |   2 +-
 .../sql/statistics/SparkFunctionStatistics.scala   |   2 +-
 .../apache/spark/sql/GlutenSQLQueryTestSuite.scala |   2 +-
 .../benchmarks/ParquetReadBenchmark.scala          |   4 +-
 .../execution/joins/GlutenBroadcastJoinSuite.scala |   2 +-
 .../hive/execution/GlutenHiveSQLQueryCHSuite.scala |   2 +-
 .../sql/statistics/SparkFunctionStatistics.scala   |   2 +-
 .../apache/spark/sql/GlutenSQLQueryTestSuite.scala |   2 +-
 .../benchmarks/ParquetReadBenchmark.scala          |   4 +-
 .../execution/joins/GlutenBroadcastJoinSuite.scala |   2 +-
 .../hive/execution/GlutenHiveSQLQueryCHSuite.scala |   2 +-
 .../sql/statistics/SparkFunctionStatistics.scala   |   2 +-
 .../MergeTwoPhasesHashBaseAggregateSuite.scala     |   2 +-
 .../expressions/GlutenExpressionMappingSuite.scala |   2 +-
 .../GlutenExtensionRewriteRuleSuite.scala          |   2 +-
 .../org/apache/gluten/sql/SQLQuerySuite.scala      |   2 +-
 .../sql/GlutenExpressionDataTypesValidation.scala  |   2 +-
 .../datasources/GlutenNoopWriterRuleSuite.scala    |   2 +-
 .../org/apache/gluten/config/GlutenConfig.scala    | 312 ++++++++++-----------
 .../org/apache/gluten/config/ReservedKeys.scala    |  35 ++-
 68 files changed, 358 insertions(+), 353 deletions(-)

diff --git 
a/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergSuite.scala
 
b/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergSuite.scala
index 43163eaf1e..38c1e606cb 100644
--- 
a/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergSuite.scala
+++ 
b/backends-clickhouse/src-iceberg/test/scala/org/apache/gluten/execution/iceberg/ClickHouseIcebergSuite.scala
@@ -62,7 +62,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
     val rightTable = "p_int_tb"
     withTable(leftTable, rightTable) {
       // Partition key of string type.
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         // Gluten does not support write iceberg table.
         spark.sql(
           s"""
@@ -84,7 +84,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
 
       // Partition key of integer type.
       withSQLConf(
-        GlutenConfig.GLUTEN_ENABLED_KEY -> "false"
+        GlutenConfig.GLUTEN_ENABLED.key -> "false"
       ) {
         // Gluten does not support write iceberg table.
         spark.sql(
@@ -145,7 +145,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
     val rightTable = "p_int_tb"
     withTable(leftTable, rightTable) {
       // Partition key of string type.
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         // Gluten does not support write iceberg table.
         spark.sql(
           s"""
@@ -167,7 +167,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
 
       // Partition key of integer type.
       withSQLConf(
-        GlutenConfig.GLUTEN_ENABLED_KEY -> "false"
+        GlutenConfig.GLUTEN_ENABLED.key -> "false"
       ) {
         // Gluten does not support write iceberg table.
         spark.sql(
@@ -228,7 +228,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
     val rightTable = "p_int_tb"
     withTable(leftTable, rightTable) {
       // Partition key of string type.
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         // Gluten does not support write iceberg table.
         spark.sql(
           s"""
@@ -250,7 +250,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
 
       // Partition key of integer type.
       withSQLConf(
-        GlutenConfig.GLUTEN_ENABLED_KEY -> "false"
+        GlutenConfig.GLUTEN_ENABLED.key -> "false"
       ) {
         // Gluten does not support write iceberg table.
         spark.sql(
@@ -350,7 +350,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
 
   test("iceberg read mor table - delete and update") {
     withTable("iceberg_mor_tb") {
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         spark.sql(
           """
             |create table iceberg_mor_tb (
@@ -403,7 +403,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
   // TODO: support merge-on-read mode
   ignore("iceberg read mor table - delete and update with merge-on-read mode") 
{
     withTable("iceberg_mor_tb") {
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         spark.sql(
           """
             |create table iceberg_mor_tb (
@@ -458,7 +458,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
 
   test("iceberg read mor table - merge into") {
     withTable("iceberg_mor_tb", "merge_into_source_tb") {
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         spark.sql(
           """
             |create table iceberg_mor_tb (
@@ -531,7 +531,7 @@ class ClickHouseIcebergSuite extends 
GlutenClickHouseWholeStageTransformerSuite
   // TODO: support merge-on-read mode
   ignore("iceberg read mor table - merge into with merge-on-read mode") {
     withTable("iceberg_mor_tb", "merge_into_source_tb") {
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         spark.sql(
           """
             |create table iceberg_mor_tb (
diff --git 
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHListenerApi.scala
 
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHListenerApi.scala
index 48ef66ca74..1555f1aec4 100644
--- 
a/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHListenerApi.scala
+++ 
b/backends-clickhouse/src/main/scala/org/apache/gluten/backendsapi/clickhouse/CHListenerApi.scala
@@ -50,7 +50,7 @@ class CHListenerApi extends ListenerApi with Logging {
     initialize(pc.conf, isDriver = true)
 
     val expressionExtensionTransformer = 
ExpressionUtil.extendedExpressionTransformer(
-      pc.conf.get(GlutenConfig.GLUTEN_EXTENDED_EXPRESSION_TRAN_CONF, "")
+      pc.conf.get(GlutenConfig.EXTENDED_EXPRESSION_TRAN_CONF.key, "")
     )
     if (expressionExtensionTransformer != null) {
       ExpressionExtensionTrait.expressionExtensionTransformer = 
expressionExtensionTransformer
@@ -75,7 +75,8 @@ class CHListenerApi extends ListenerApi with Logging {
     Convention.ensureSparkRowAndBatchTypesRegistered()
     CHBatch.ensureRegistered()
     SparkDirectoryUtil.init(conf)
-    val libPath = conf.get(GlutenConfig.GLUTEN_LIB_PATH, StringUtils.EMPTY)
+    val libPath =
+      conf.get(GlutenConfig.GLUTEN_LIB_PATH.key, 
GlutenConfig.GLUTEN_LIB_PATH.defaultValueString)
     if (StringUtils.isBlank(libPath)) {
       throw new IllegalArgumentException(
         "Please set spark.gluten.sql.columnar.libpath to enable clickhouse 
backend")
@@ -83,7 +84,7 @@ class CHListenerApi extends ListenerApi with Logging {
     if (isDriver) {
       JniLibLoader.loadFromPath(libPath, true)
     } else {
-      val executorLibPath = conf.get(GlutenConfig.GLUTEN_EXECUTOR_LIB_PATH, 
libPath)
+      val executorLibPath = 
conf.get(GlutenConfig.GLUTEN_EXECUTOR_LIB_PATH.key, libPath)
       JniLibLoader.loadFromPath(executorLibPath, true)
     }
     // Add configs
diff --git 
a/backends-clickhouse/src/main/scala/org/apache/spark/rpc/GlutenDriverEndpoint.scala
 
b/backends-clickhouse/src/main/scala/org/apache/spark/rpc/GlutenDriverEndpoint.scala
index 58eb542e41..be0701ea59 100644
--- 
a/backends-clickhouse/src/main/scala/org/apache/spark/rpc/GlutenDriverEndpoint.scala
+++ 
b/backends-clickhouse/src/main/scala/org/apache/spark/rpc/GlutenDriverEndpoint.scala
@@ -96,8 +96,8 @@ class GlutenDriverEndpoint extends IsolatedRpcEndpoint with 
Logging {
 
 object GlutenDriverEndpoint extends Logging with RemovalListener[String, 
util.Set[String]] {
   private lazy val executionResourceExpiredTime = SparkEnv.get.conf.getLong(
-    GlutenConfig.GLUTEN_RESOURCE_RELATION_EXPIRED_TIME,
-    GlutenConfig.GLUTEN_RESOURCE_RELATION_EXPIRED_TIME_DEFAULT
+    GlutenConfig.GLUTEN_RESOURCE_RELATION_EXPIRED_TIME.key,
+    GlutenConfig.GLUTEN_RESOURCE_RELATION_EXPIRED_TIME.defaultValue.get
   )
 
   var glutenDriverEndpointRef: RpcEndpointRef = _
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala 
b/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala
index 7edd69e2b4..1face5147f 100644
--- a/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala
+++ b/backends-clickhouse/src/test/scala/org/apache/gluten/RunTPCHTest.scala
@@ -94,7 +94,7 @@ object RunTPCHTest {
       .config("spark.databricks.delta.properties.defaults.checkpointInterval", 
5)
       .config("spark.databricks.delta.stalenessLimit", 3600 * 1000)
       .config(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .config(GlutenConfig.GLUTEN_LIB_PATH, libPath)
+      .config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
       .config("spark.gluten.sql.columnar.iterator", "true")
       .config("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .config("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/affinity/CHUTAffinity.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/affinity/CHUTAffinity.scala
index caa6a8d55e..2c728144ee 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/affinity/CHUTAffinity.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/affinity/CHUTAffinity.scala
@@ -31,10 +31,10 @@ object CHUTSoftAffinityManager extends AffinityManager {
   override lazy val usingSoftAffinity: Boolean = true
 
   override lazy val minOnTargetHosts: Int =
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS_DEFAULT_VALUE
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS.defaultValue.get
 
   override lazy val detectDuplicateReading: Boolean = true
 
   override lazy val duplicateReadingMaxCacheItems: Int =
-    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS_DEFAULT_VALUE
+    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS.defaultValue.get
 }
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
index b1ed60d210..28aad55379 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseJoinSuite.scala
@@ -43,7 +43,7 @@ class GlutenClickHouseJoinSuite extends 
GlutenClickHouseWholeStageTransformerSui
       .set("spark.sql.adaptive.enabled", "false")
       .set("spark.sql.files.minPartitionNum", "1")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeExceptionSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeExceptionSuite.scala
index 3db2684037..9b617b998c 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeExceptionSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeExceptionSuite.scala
@@ -25,7 +25,7 @@ class GlutenClickHouseNativeExceptionSuite extends 
GlutenClickHouseWholeStageTra
 
   override protected def sparkConf: SparkConf = {
     super.sparkConf
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
   }
 
   test("native exception caught by jvm") {
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
index 77af49ca99..98d8fb9ee0 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeLibSuite.scala
@@ -42,7 +42,7 @@ class GlutenClickHouseNativeLibSuite extends PlanTest {
         .builder()
         .master("local[1]")
         .config(baseSparkConf)
-        .config(GlutenConfig.GLUTEN_LIB_PATH, "path/not/exist/libch.so")
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, "path/not/exist/libch.so")
         .getOrCreate()
       spark.sql("select 1").show()
     } catch {
@@ -65,8 +65,8 @@ class GlutenClickHouseNativeLibSuite extends PlanTest {
         .builder()
         .master("local[1]")
         .config(baseSparkConf)
-        .config(GlutenConfig.GLUTEN_LIB_PATH, 
UTSystemParameters.clickHouseLibPath)
-        .config(GlutenConfig.GLUTEN_EXECUTOR_LIB_PATH, 
"/path/not/exist/libch.so")
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
+        .config(GlutenConfig.GLUTEN_EXECUTOR_LIB_PATH.key, 
"/path/not/exist/libch.so")
         .getOrCreate()
       spark.sql("select 1").show()
     } finally {
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
index ed72136c51..6fffe30f05 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseSyntheticDataSuite.scala
@@ -52,7 +52,7 @@ class GlutenClickHouseSyntheticDataSuite
       .set("spark.databricks.delta.properties.defaults.checkpointInterval", 
"5")
       .set("spark.databricks.delta.stalenessLimit", "3600000")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
index 7e377e6798..f1f93692ec 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCDSAbstractSuite.scala
@@ -132,7 +132,7 @@ abstract class GlutenClickHouseTPCDSAbstractSuite
       .set("spark.databricks.delta.properties.defaults.checkpointInterval", 
"5")
       .set("spark.databricks.delta.stalenessLimit", "3600000")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
index 63e801d20f..4c748fcb1d 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseTPCHAbstractSuite.scala
@@ -568,7 +568,7 @@ abstract class GlutenClickHouseTPCHAbstractSuite
       .set("spark.databricks.delta.stalenessLimit", "3600000")
       .set("spark.sql.extensions", "io.delta.sql.DeltaSparkSessionExtension")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
index cef8b9c776..db5fede0d5 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseWholeStageTransformerSuite.scala
@@ -79,7 +79,7 @@ class GlutenClickHouseWholeStageTransformerSuite extends 
WholeStageTransformerSu
     import org.apache.gluten.backendsapi.clickhouse.CHConf._
 
     val conf = super.sparkConf
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.enable.native.validation", "false")
       .set("spark.sql.warehouse.dir", warehouse)
       .setCHConfig("user_defined_path", "/tmp/user_defined")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
index 84c92d1e04..5923f1484a 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenFunctionValidateSuite.scala
@@ -57,7 +57,7 @@ class GlutenFunctionValidateSuite extends 
GlutenClickHouseWholeStageTransformerS
       .set("spark.databricks.delta.properties.defaults.checkpointInterval", 
"5")
       .set("spark.databricks.delta.stalenessLimit", "3600000")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
index 07fd0b6ef8..eac4c54cfc 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/compatibility/GlutenClickhouseFunctionSuite.scala
@@ -51,7 +51,7 @@ class GlutenClickhouseFunctionSuite extends 
GlutenClickHouseTPCHAbstractSuite {
       .set("spark.databricks.delta.properties.defaults.checkpointInterval", 
"5")
       .set("spark.databricks.delta.stalenessLimit", "3600000")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
index 90b86a5470..917317bcc6 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseHiveTableSuite.scala
@@ -56,7 +56,7 @@ class GlutenClickHouseHiveTableSuite
       .set("spark.sql.adaptive.enabled", "false")
       .set("spark.sql.files.minPartitionNum", "1")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
index da0d82ef81..b6067f138d 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/gluten/execution/hive/GlutenClickHouseNativeWriteTableSuite.scala
@@ -57,7 +57,7 @@ class GlutenClickHouseNativeWriteTableSuite
       .set("spark.databricks.delta.properties.defaults.checkpointInterval", 
"5")
       .set("spark.databricks.delta.stalenessLimit", "3600000")
       .set(ClickHouseConfig.CLICKHOUSE_WORKER_ID, "1")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .set("spark.gluten.sql.columnar.iterator", "true")
       .set("spark.gluten.sql.columnar.hashagg.enablefinal", "true")
       .set("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala 
b/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala
index b286551bba..78b96ce6a2 100644
--- a/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala
+++ b/backends-clickhouse/src/test/scala/org/apache/gluten/s3/S3AuthSuite.scala
@@ -113,7 +113,7 @@ class S3AuthSuite extends AnyFunSuite {
         .appName("Gluten-S3-Test")
         .master(s"local[1]")
         .config("spark.plugins", "org.apache.gluten.GlutenPlugin")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, libPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
         .config("spark.memory.offHeap.enabled", "true")
         .config("spark.memory.offHeap.size", "1g")
         .config("spark.gluten.sql.enable.native.validation", "false")
@@ -144,7 +144,7 @@ class S3AuthSuite extends AnyFunSuite {
         .appName("Gluten-S3-Test")
         .master(s"local[1]")
         .config("spark.plugins", "org.apache.gluten.GlutenPlugin")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, libPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
         .config("spark.memory.offHeap.enabled", "true")
         .config("spark.memory.offHeap.size", "1g")
         .config("spark.gluten.sql.enable.native.validation", "false")
@@ -182,7 +182,7 @@ class S3AuthSuite extends AnyFunSuite {
         .appName("Gluten-S3-Test")
         .master(s"local[1]")
         .config("spark.plugins", "org.apache.gluten.GlutenPlugin")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, libPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
         .config("spark.memory.offHeap.enabled", "true")
         .config("spark.memory.offHeap.size", "1g")
         .config("spark.gluten.sql.enable.native.validation", "false")
@@ -231,7 +231,7 @@ class S3AuthSuite extends AnyFunSuite {
       .appName("Gluten-S3-Test")
       .master(s"local[1]")
       .config("spark.plugins", "org.apache.gluten.GlutenPlugin")
-      .config(GlutenConfig.GLUTEN_LIB_PATH, libPath)
+      .config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
       .config("spark.memory.offHeap.enabled", "true")
       .config("spark.memory.offHeap.size", "1g")
       .config("spark.gluten.sql.enable.native.validation", "false")
@@ -301,7 +301,7 @@ class S3AuthSuite extends AnyFunSuite {
       .appName("Gluten-S3-Test")
       .master(s"local[1]")
       .config("spark.plugins", "org.apache.gluten.GlutenPlugin")
-      .config(GlutenConfig.GLUTEN_LIB_PATH, libPath)
+      .config(GlutenConfig.GLUTEN_LIB_PATH.key, libPath)
       .config("spark.memory.offHeap.enabled", "true")
       .config("spark.memory.offHeap.size", "1g")
       .config("spark.gluten.sql.enable.native.validation", "false")
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
 
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
index 6041c50d41..7b067643f6 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
+++ 
b/backends-clickhouse/src/test/scala/org/apache/spark/sql/execution/benchmarks/CHSqlBasedBenchmark.scala
@@ -32,7 +32,7 @@ trait CHSqlBasedBenchmark extends SqlBasedBenchmark {
   def getSparkConf: SparkConf = {
     val conf = new SparkConf()
       .setAppName(appName)
-      .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH, 
UTSystemParameters.clickHouseLibPath)
+      .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
       .setIfMissing("spark.master", s"local[$thrdNum]")
       .set("spark.plugins", "org.apache.gluten.GlutenPlugin")
       .set(
@@ -57,7 +57,8 @@ trait CHSqlBasedBenchmark extends SqlBasedBenchmark {
 
   override def afterAll(): Unit = {
     DeltaLog.clearCache()
-    val libPath = spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH, 
UTSystemParameters.clickHouseLibPath)
+    val libPath =
+      spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH.key, 
UTSystemParameters.clickHouseLibPath)
     JniLibLoader.unloadFromPath(libPath)
     // Wait for Ctrl+C, convenient for seeing Spark UI
     // Thread.sleep(600000)
diff --git 
a/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarBatchSerializer.scala
 
b/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarBatchSerializer.scala
index 98bae3f956..dc314ba44a 100644
--- 
a/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarBatchSerializer.scala
+++ 
b/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarBatchSerializer.scala
@@ -16,7 +16,7 @@
  */
 package org.apache.spark.shuffle
 
-import org.apache.gluten.config.GlutenConfig.{GLUTEN_RSS_SORT_SHUFFLE_WRITER, 
GLUTEN_SORT_SHUFFLE_WRITER}
+import org.apache.gluten.config.ReservedKeys.{GLUTEN_RSS_SORT_SHUFFLE_WRITER, 
GLUTEN_SORT_SHUFFLE_WRITER}
 import org.apache.gluten.backendsapi.BackendsApiManager
 import org.apache.gluten.memory.arrow.alloc.ArrowBufferAllocators
 import org.apache.gluten.runtime.Runtimes
diff --git 
a/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarShuffleWriter.scala
 
b/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarShuffleWriter.scala
index 7e7d0bb56a..115982f48c 100644
--- 
a/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarShuffleWriter.scala
+++ 
b/backends-velox/src-celeborn/main/scala/org/apache/spark/shuffle/VeloxCelebornColumnarShuffleWriter.scala
@@ -33,6 +33,7 @@ import org.apache.spark.util.SparkResourceUtil
 import org.apache.celeborn.client.ShuffleClient
 import org.apache.celeborn.common.CelebornConf
 import org.apache.gluten.config.GlutenConfig
+import org.apache.gluten.config.ReservedKeys
 
 import java.io.IOException
 
@@ -50,7 +51,7 @@ class VeloxCelebornColumnarShuffleWriter[K, V](
     celebornConf,
     client,
     writeMetrics) {
-  private val isSort = 
!GlutenConfig.GLUTEN_HASH_SHUFFLE_WRITER.equals(shuffleWriterType)
+  private val isSort = 
!ReservedKeys.GLUTEN_HASH_SHUFFLE_WRITER.equals(shuffleWriterType)
 
   private val runtime =
     Runtimes.contextInstance(BackendsApiManager.getBackendName, 
"CelebornShuffleWriter")
diff --git 
a/backends-velox/src-uniffle/main/java/org/apache/spark/shuffle/writer/VeloxUniffleColumnarShuffleWriter.java
 
b/backends-velox/src-uniffle/main/java/org/apache/spark/shuffle/writer/VeloxUniffleColumnarShuffleWriter.java
index 20c9594216..3fd8261e96 100644
--- 
a/backends-velox/src-uniffle/main/java/org/apache/spark/shuffle/writer/VeloxUniffleColumnarShuffleWriter.java
+++ 
b/backends-velox/src-uniffle/main/java/org/apache/spark/shuffle/writer/VeloxUniffleColumnarShuffleWriter.java
@@ -17,6 +17,7 @@
 package org.apache.spark.shuffle.writer;
 
 import org.apache.gluten.config.GlutenConfig;
+import org.apache.gluten.config.ReservedKeys;
 import org.apache.gluten.backendsapi.BackendsApiManager;
 import org.apache.gluten.columnarbatch.ColumnarBatches;
 import org.apache.gluten.memory.memtarget.MemoryTarget;
@@ -170,8 +171,8 @@ public class VeloxUniffleColumnarShuffleWriter<K, V> 
extends RssShuffleWriter<K,
                       columnarDep.nativePartitioning(), partitionId),
                   "uniffle",
                   isSort
-                      ? GlutenConfig.GLUTEN_SORT_SHUFFLE_WRITER()
-                      : GlutenConfig.GLUTEN_HASH_SHUFFLE_WRITER(),
+                      ? ReservedKeys.GLUTEN_SORT_SHUFFLE_WRITER()
+                      : ReservedKeys.GLUTEN_HASH_SHUFFLE_WRITER(),
                   reallocThreshold);
           runtime
               .memoryManager()
diff --git 
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxListenerApi.scala
 
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxListenerApi.scala
index 0453558d1a..4a71378640 100644
--- 
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxListenerApi.scala
+++ 
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxListenerApi.scala
@@ -74,7 +74,7 @@ class VeloxListenerApi extends ListenerApi with Logging {
           s" the recommended size 
${ByteUnit.BYTE.toMiB(desiredOverheadSize)}MiB." +
           s" This may cause OOM.")
     }
-    conf.set(GlutenConfig.GLUTEN_OVERHEAD_SIZE_IN_BYTES_KEY, 
overheadSize.toString)
+    conf.set(GlutenConfig.COLUMNAR_OVERHEAD_SIZE_IN_BYTES.key, 
overheadSize.toString)
 
     // Sql table cache serializer.
     if (conf.getBoolean(GlutenConfig.COLUMNAR_TABLE_CACHE_ENABLED.key, 
defaultValue = false)) {
@@ -147,8 +147,8 @@ class VeloxListenerApi extends ListenerApi with Logging {
       )
 
     // Sets this configuration only once, since not undoable.
-    if (conf.getBoolean(GlutenConfig.GLUTEN_DEBUG_KEEP_JNI_WORKSPACE, 
defaultValue = false)) {
-      val debugDir = conf.get(GlutenConfig.GLUTEN_DEBUG_KEEP_JNI_WORKSPACE_DIR)
+    if (conf.getBoolean(GlutenConfig.DEBUG_KEEP_JNI_WORKSPACE.key, 
defaultValue = false)) {
+      val debugDir = conf.get(GlutenConfig.DEBUG_KEEP_JNI_WORKSPACE_DIR.key)
       JniWorkspace.enableDebug(debugDir)
     }
 
@@ -166,11 +166,11 @@ class VeloxListenerApi extends ListenerApi with Logging {
     SharedLibraryLoader.load(conf, loader)
 
     // Load backend libraries.
-    val libPath = conf.get(GlutenConfig.GLUTEN_LIB_PATH, StringUtils.EMPTY)
+    val libPath = conf.get(GlutenConfig.GLUTEN_LIB_PATH.key, StringUtils.EMPTY)
     if (StringUtils.isNotBlank(libPath)) { // Path based load. Ignore all 
other loadees.
       JniLibLoader.loadFromPath(libPath, false)
     } else {
-      val baseLibName = conf.get(GlutenConfig.GLUTEN_LIB_NAME, "gluten")
+      val baseLibName = conf.get(GlutenConfig.GLUTEN_LIB_NAME.key, "gluten")
       loader.load(s"$platformLibDir/${System.mapLibraryName(baseLibName)}", 
false)
       
loader.load(s"$platformLibDir/${System.mapLibraryName(VeloxBackend.BACKEND_NAME)}",
 false)
     }
diff --git 
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxSparkPlanExecApi.scala
 
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxSparkPlanExecApi.scala
index d0a7ebe418..559882e8b9 100644
--- 
a/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxSparkPlanExecApi.scala
+++ 
b/backends-velox/src/main/scala/org/apache/gluten/backendsapi/velox/VeloxSparkPlanExecApi.scala
@@ -18,6 +18,7 @@ package org.apache.gluten.backendsapi.velox
 
 import org.apache.gluten.backendsapi.SparkPlanExecApi
 import org.apache.gluten.config.GlutenConfig
+import org.apache.gluten.config.ReservedKeys
 import org.apache.gluten.exception.GlutenNotSupportException
 import org.apache.gluten.execution._
 import org.apache.gluten.expression._
@@ -545,7 +546,7 @@ class VeloxSparkPlanExecApi extends SparkPlanExecApi {
   override def useSortBasedShuffle(partitioning: Partitioning, output: 
Seq[Attribute]): Boolean = {
     val conf = GlutenConfig.get
     lazy val isCelebornSortBasedShuffle = conf.isUseCelebornShuffleManager &&
-      conf.celebornShuffleWriterType == GlutenConfig.GLUTEN_SORT_SHUFFLE_WRITER
+      conf.celebornShuffleWriterType == ReservedKeys.GLUTEN_SORT_SHUFFLE_WRITER
     partitioning != SinglePartition &&
     (partitioning.numPartitions >= 
GlutenConfig.get.columnarShuffleSortPartitionsThreshold ||
       output.size >= GlutenConfig.get.columnarShuffleSortColumnsThreshold) ||
diff --git 
a/backends-velox/src/main/scala/org/apache/gluten/utils/SharedLibraryLoader.scala
 
b/backends-velox/src/main/scala/org/apache/gluten/utils/SharedLibraryLoader.scala
index 5901a93cf1..3632202de9 100755
--- 
a/backends-velox/src/main/scala/org/apache/gluten/utils/SharedLibraryLoader.scala
+++ 
b/backends-velox/src/main/scala/org/apache/gluten/utils/SharedLibraryLoader.scala
@@ -31,8 +31,8 @@ trait SharedLibraryLoader {
 object SharedLibraryLoader {
   def load(conf: SparkConf, jni: JniLibLoader): Unit = {
     val shouldLoad = conf.getBoolean(
-      GlutenConfig.GLUTEN_LOAD_LIB_FROM_JAR,
-      GlutenConfig.GLUTEN_LOAD_LIB_FROM_JAR_DEFAULT)
+      GlutenConfig.GLUTEN_LOAD_LIB_FROM_JAR.key,
+      GlutenConfig.GLUTEN_LOAD_LIB_FROM_JAR.defaultValue.get)
     if (!shouldLoad) {
       return
     }
@@ -54,13 +54,13 @@ object SharedLibraryLoader {
   }
 
   private def find(conf: SparkConf): SharedLibraryLoader = {
-    val systemName = conf.getOption(GlutenConfig.GLUTEN_LOAD_LIB_OS)
+    val systemName = conf.getOption(GlutenConfig.GLUTEN_LOAD_LIB_OS.key)
     val loader = if (systemName.isDefined) {
-      val systemVersion = 
conf.getOption(GlutenConfig.GLUTEN_LOAD_LIB_OS_VERSION)
+      val systemVersion = 
conf.getOption(GlutenConfig.GLUTEN_LOAD_LIB_OS_VERSION.key)
       if (systemVersion.isEmpty) {
         throw new GlutenException(
-          s"${GlutenConfig.GLUTEN_LOAD_LIB_OS_VERSION} must be specified when 
specifies the " +
-            s"${GlutenConfig.GLUTEN_LOAD_LIB_OS}")
+          s"${GlutenConfig.GLUTEN_LOAD_LIB_OS_VERSION.key} must be specified 
when specifies the " +
+            s"${GlutenConfig.GLUTEN_LOAD_LIB_OS.key}")
       }
       getForOS(systemName.get, systemVersion.get, "")
     } else {
diff --git 
a/backends-velox/src/main/scala/org/apache/gluten/vectorized/ColumnarBatchSerializer.scala
 
b/backends-velox/src/main/scala/org/apache/gluten/vectorized/ColumnarBatchSerializer.scala
index 3af742f339..a51a4fca82 100644
--- 
a/backends-velox/src/main/scala/org/apache/gluten/vectorized/ColumnarBatchSerializer.scala
+++ 
b/backends-velox/src/main/scala/org/apache/gluten/vectorized/ColumnarBatchSerializer.scala
@@ -18,6 +18,7 @@ package org.apache.gluten.vectorized
 
 import org.apache.gluten.backendsapi.BackendsApiManager
 import org.apache.gluten.config.GlutenConfig
+import org.apache.gluten.config.ReservedKeys
 import org.apache.gluten.iterator.ClosableIterator
 import org.apache.gluten.memory.arrow.alloc.ArrowBufferAllocators
 import org.apache.gluten.runtime.Runtimes
@@ -55,7 +56,7 @@ class ColumnarBatchSerializer(
   with Serializable {
 
   private val shuffleWriterType =
-    if (isSort) GlutenConfig.GLUTEN_SORT_SHUFFLE_WRITER else 
GlutenConfig.GLUTEN_HASH_SHUFFLE_WRITER
+    if (isSort) ReservedKeys.GLUTEN_SORT_SHUFFLE_WRITER else 
ReservedKeys.GLUTEN_HASH_SHUFFLE_WRITER
 
   /** Creates a new [[SerializerInstance]]. */
   override def newInstance(): SerializerInstance = {
diff --git 
a/backends-velox/src/main/scala/org/apache/spark/shuffle/ColumnarShuffleWriter.scala
 
b/backends-velox/src/main/scala/org/apache/spark/shuffle/ColumnarShuffleWriter.scala
index 8581bc5c7c..4d49b93447 100644
--- 
a/backends-velox/src/main/scala/org/apache/spark/shuffle/ColumnarShuffleWriter.scala
+++ 
b/backends-velox/src/main/scala/org/apache/spark/shuffle/ColumnarShuffleWriter.scala
@@ -19,6 +19,7 @@ package org.apache.spark.shuffle
 import org.apache.gluten.backendsapi.BackendsApiManager
 import org.apache.gluten.columnarbatch.ColumnarBatches
 import org.apache.gluten.config.GlutenConfig
+import org.apache.gluten.config.ReservedKeys
 import org.apache.gluten.memory.memtarget.{MemoryTarget, Spiller, Spillers}
 import org.apache.gluten.runtime.Runtimes
 import org.apache.gluten.vectorized._
@@ -113,7 +114,7 @@ class ColumnarShuffleWriter[K, V](
   private val taskContext: TaskContext = TaskContext.get()
 
   private val shuffleWriterType: String =
-    if (isSort) GlutenConfig.GLUTEN_SORT_SHUFFLE_WRITER else 
GlutenConfig.GLUTEN_HASH_SHUFFLE_WRITER
+    if (isSort) ReservedKeys.GLUTEN_SORT_SHUFFLE_WRITER else 
ReservedKeys.GLUTEN_HASH_SHUFFLE_WRITER
 
   private def availableOffHeapPerTask(): Long = {
     val perTask =
diff --git 
a/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
 
b/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
index f0be15f07d..83d2bf5f4d 100644
--- 
a/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
+++ 
b/backends-velox/src/test/scala/org/apache/gluten/execution/MiscOperatorSuite.scala
@@ -1970,11 +1970,11 @@ class MiscOperatorSuite extends 
VeloxWholeStageTransformerSuite with AdaptiveSpa
   }
 
   test("test 'spark.gluten.enabled'") {
-    withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "true") {
+    withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "true") {
       runQueryAndCompare("select * from lineitem limit 1") {
         checkGlutenOperatorMatch[FileSourceScanExecTransformer]
       }
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         runQueryAndCompare("select * from lineitem limit 1") {
           checkSparkOperatorMatch[FileSourceScanExec]
         }
diff --git 
a/gluten-celeborn/src-celeborn/main/scala/org/apache/spark/shuffle/CelebornColumnarShuffleWriter.scala
 
b/gluten-celeborn/src-celeborn/main/scala/org/apache/spark/shuffle/CelebornColumnarShuffleWriter.scala
index be052d4e74..7a514689e3 100644
--- 
a/gluten-celeborn/src-celeborn/main/scala/org/apache/spark/shuffle/CelebornColumnarShuffleWriter.scala
+++ 
b/gluten-celeborn/src-celeborn/main/scala/org/apache/spark/shuffle/CelebornColumnarShuffleWriter.scala
@@ -19,6 +19,7 @@ package org.apache.spark.shuffle
 import org.apache.celeborn.client.ShuffleClient
 import org.apache.celeborn.common.CelebornConf
 import org.apache.gluten.config.GlutenConfig
+import org.apache.gluten.config.ReservedKeys
 import org.apache.spark._
 import org.apache.spark.internal.Logging
 import org.apache.spark.internal.config.SHUFFLE_COMPRESS
@@ -71,7 +72,7 @@ abstract class CelebornColumnarShuffleWriter[K, V](
   protected val shuffleWriterType: String =
     celebornConf.shuffleWriterMode.name
       .toLowerCase(Locale.ROOT)
-      .replace(GlutenConfig.GLUTEN_SORT_SHUFFLE_WRITER, 
GlutenConfig.GLUTEN_RSS_SORT_SHUFFLE_WRITER)
+      .replace(ReservedKeys.GLUTEN_SORT_SHUFFLE_WRITER, 
ReservedKeys.GLUTEN_RSS_SORT_SHUFFLE_WRITER)
 
   protected val celebornPartitionPusher = new CelebornPartitionPusher(
     shuffleId,
diff --git 
a/gluten-core/src/main/java/org/apache/gluten/memory/memtarget/ThrowOnOomMemoryTarget.java
 
b/gluten-core/src/main/java/org/apache/gluten/memory/memtarget/ThrowOnOomMemoryTarget.java
index f517a2cc44..af5a3ff0ce 100644
--- 
a/gluten-core/src/main/java/org/apache/gluten/memory/memtarget/ThrowOnOomMemoryTarget.java
+++ 
b/gluten-core/src/main/java/org/apache/gluten/memory/memtarget/ThrowOnOomMemoryTarget.java
@@ -62,29 +62,31 @@ public class ThrowOnOomMemoryTarget implements MemoryTarget 
{
         .append(
             String.format(
                 "\t%s=%s",
-                GlutenConfig$.MODULE$.GLUTEN_OFFHEAP_SIZE_IN_BYTES_KEY(),
+                GlutenConfig$.MODULE$.COLUMNAR_OFFHEAP_SIZE_IN_BYTES().key(),
                 reformatBytes(
                     SQLConf.get()
-                        
.getConfString(GlutenConfig$.MODULE$.GLUTEN_OFFHEAP_SIZE_IN_BYTES_KEY()))))
+                        .getConfString(
+                            
GlutenConfig$.MODULE$.COLUMNAR_OFFHEAP_SIZE_IN_BYTES().key()))))
         .append(System.lineSeparator())
         .append(
             String.format(
                 "\t%s=%s",
-                GlutenConfig$.MODULE$.GLUTEN_TASK_OFFHEAP_SIZE_IN_BYTES_KEY(),
+                
GlutenConfig$.MODULE$.COLUMNAR_TASK_OFFHEAP_SIZE_IN_BYTES().key(),
                 reformatBytes(
                     SQLConf.get()
                         .getConfString(
-                            
GlutenConfig$.MODULE$.GLUTEN_TASK_OFFHEAP_SIZE_IN_BYTES_KEY()))))
+                            
GlutenConfig$.MODULE$.COLUMNAR_TASK_OFFHEAP_SIZE_IN_BYTES().key()))))
         .append(System.lineSeparator())
         .append(
             String.format(
                 "\t%s=%s",
-                
GlutenConfig$.MODULE$.GLUTEN_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES_KEY(),
+                
GlutenConfig$.MODULE$.COLUMNAR_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES().key(),
                 reformatBytes(
                     SQLConf.get()
                         .getConfString(
                             GlutenConfig$.MODULE$
-                                
.GLUTEN_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES_KEY()))))
+                                
.COLUMNAR_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES()
+                                .key()))))
         .append(System.lineSeparator())
         .append(
             String.format(
@@ -95,9 +97,9 @@ public class ThrowOnOomMemoryTarget implements MemoryTarget {
         .append(
             String.format(
                 "\t%s=%s",
-                GlutenConfig$.MODULE$.GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED(),
+                GlutenConfig$.MODULE$.DYNAMIC_OFFHEAP_SIZING_ENABLED().key(),
                 SQLConf.get()
-                    
.getConfString(GlutenConfig$.MODULE$.GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED())))
+                    
.getConfString(GlutenConfig$.MODULE$.DYNAMIC_OFFHEAP_SIZING_ENABLED().key())))
         .append(System.lineSeparator());
     // Dump all consumer usages to exception body
     errorBuilder.append(SparkMemoryUtil.dumpMemoryTargetStats(target));
diff --git a/gluten-core/src/main/scala/org/apache/gluten/GlutenPlugin.scala 
b/gluten-core/src/main/scala/org/apache/gluten/GlutenPlugin.scala
index 2cd524c327..aaca819d39 100644
--- a/gluten-core/src/main/scala/org/apache/gluten/GlutenPlugin.scala
+++ b/gluten-core/src/main/scala/org/apache/gluten/GlutenPlugin.scala
@@ -60,7 +60,11 @@ private[gluten] class GlutenDriverPlugin extends 
DriverPlugin with Logging {
 
     // Register Gluten listeners
     GlutenSQLAppStatusListener.register(sc)
-    if (conf.getBoolean(GLUTEN_SOFT_AFFINITY_ENABLED, 
GLUTEN_SOFT_AFFINITY_ENABLED_DEFAULT_VALUE)) {
+    if (
+      conf.getBoolean(
+        GLUTEN_SOFT_AFFINITY_ENABLED.key,
+        GLUTEN_SOFT_AFFINITY_ENABLED.defaultValue.get)
+    ) {
       SoftAffinityListener.register(sc)
     }
 
@@ -75,7 +79,9 @@ private[gluten] class GlutenDriverPlugin extends DriverPlugin 
with Logging {
   }
 
   override def registerMetrics(appId: String, pluginContext: PluginContext): 
Unit = {
-    if (pluginContext.conf().getBoolean(GLUTEN_UI_ENABLED, true)) {
+    if (
+      pluginContext.conf().getBoolean(GLUTEN_UI_ENABLED.key, 
GLUTEN_UI_ENABLED.defaultValue.get)
+    ) {
       _sc.foreach {
         sc =>
           GlutenEventUtils.attachUI(sc)
@@ -140,8 +146,8 @@ private[gluten] class GlutenDriverPlugin extends 
DriverPlugin with Logging {
 
     // adaptive custom cost evaluator class
     val enableGlutenCostEvaluator = conf.getBoolean(
-      GlutenConfig.GLUTEN_COST_EVALUATOR_ENABLED,
-      GLUTEN_COST_EVALUATOR_ENABLED_DEFAULT_VALUE)
+      GlutenConfig.COST_EVALUATOR_ENABLED.key,
+      GlutenConfig.COST_EVALUATOR_ENABLED.defaultValue.get)
     if (enableGlutenCostEvaluator) {
       val costEvaluator = 
"org.apache.spark.sql.execution.adaptive.GlutenCostEvaluator"
       conf.set(SQLConf.ADAPTIVE_CUSTOM_COST_EVALUATOR_CLASS.key, costEvaluator)
@@ -150,7 +156,9 @@ private[gluten] class GlutenDriverPlugin extends 
DriverPlugin with Logging {
     // check memory off-heap enabled and size
     val minOffHeapSize = "1MB"
     if (
-      !conf.getBoolean(GlutenConfig.GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED, 
false) &&
+      !conf.getBoolean(
+        DYNAMIC_OFFHEAP_SIZING_ENABLED.key,
+        DYNAMIC_OFFHEAP_SIZING_ENABLED.defaultValue.get) &&
       (!conf.getBoolean(GlutenConfig.SPARK_OFFHEAP_ENABLED, false) ||
         conf.getSizeAsBytes(GlutenConfig.SPARK_OFFHEAP_SIZE_KEY, 0) < 
JavaUtils.byteStringAsBytes(
           minOffHeapSize))
@@ -162,11 +170,11 @@ private[gluten] class GlutenDriverPlugin extends 
DriverPlugin with Logging {
 
     // Session's local time zone must be set. If not explicitly set by user, 
its default
     // value (detected for the platform) is used, consistent with spark.
-    conf.set(GLUTEN_DEFAULT_SESSION_TIMEZONE_KEY, 
SQLConf.SESSION_LOCAL_TIMEZONE.defaultValueString)
+    conf.set(GLUTEN_DEFAULT_SESSION_TIMEZONE.key, 
SQLConf.SESSION_LOCAL_TIMEZONE.defaultValueString)
 
     // Task slots.
     val taskSlots = SparkResourceUtil.getTaskSlots(conf)
-    conf.set(GLUTEN_NUM_TASK_SLOTS_PER_EXECUTOR_KEY, taskSlots.toString)
+    conf.set(NUM_TASK_SLOTS_PER_EXECUTOR.key, taskSlots.toString)
 
     val onHeapSize: Long = conf.getSizeAsBytes(SPARK_ONHEAP_SIZE_KEY, 1024 * 
1024 * 1024)
 
@@ -174,51 +182,62 @@ private[gluten] class GlutenDriverPlugin extends 
DriverPlugin with Logging {
     // size. Otherwise, the off-heap size is set to the value specified by the 
user (if any).
     // Note that this means that we will IGNORE the off-heap size specified by 
the user if the
     // dynamic off-heap feature is enabled.
-    val offHeapSize: Long = if 
(conf.getBoolean(GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED, false)) {
-      // Since when dynamic off-heap sizing is enabled, we commingle on-heap
-      // and off-heap memory, we set the off-heap size to the usable on-heap 
size. We will
-      // size it with a memory fraction, which can be aggressively set, but 
the default
-      // is using the same way that Spark sizes on-heap memory:
-      //
-      // spark.gluten.memory.dynamic.offHeap.sizing.memory.fraction *
-      //    (spark.executor.memory - 300MB).
-      //
-      // We will be careful to use the same configuration settings as Spark to 
ensure
-      // that we are sizing the off-heap memory in the same way as Spark sizes 
on-heap memory.
-      // The 300MB value, unfortunately, is hard-coded in Spark code.
-      ((onHeapSize - (300 * 1024 * 1024)) *
-        conf.getDouble(GLUTEN_DYNAMIC_OFFHEAP_SIZING_MEMORY_FRACTION, 
0.6d)).toLong
-    } else {
-      // Optimistic off-heap sizes, assuming all storage memory can be 
borrowed into execution
-      // memory pool, regardless of Spark option spark.memory.storageFraction.
-      conf.getSizeAsBytes(SPARK_OFFHEAP_SIZE_KEY, 0L)
-    }
+    val offHeapSize: Long =
+      if (
+        conf.getBoolean(
+          DYNAMIC_OFFHEAP_SIZING_ENABLED.key,
+          DYNAMIC_OFFHEAP_SIZING_ENABLED.defaultValue.get)
+      ) {
+        // Since when dynamic off-heap sizing is enabled, we commingle on-heap
+        // and off-heap memory, we set the off-heap size to the usable on-heap 
size. We will
+        // size it with a memory fraction, which can be aggressively set, but 
the default
+        // is using the same way that Spark sizes on-heap memory:
+        //
+        // spark.gluten.memory.dynamic.offHeap.sizing.memory.fraction *
+        //    (spark.executor.memory - 300MB).
+        //
+        // We will be careful to use the same configuration settings as Spark 
to ensure
+        // that we are sizing the off-heap memory in the same way as Spark 
sizes on-heap memory.
+        // The 300MB value, unfortunately, is hard-coded in Spark code.
+        ((onHeapSize - (300 * 1024 * 1024)) *
+          conf.getDouble(DYNAMIC_OFFHEAP_SIZING_MEMORY_FRACTION.key, 
0.6d)).toLong
+      } else {
+        // Optimistic off-heap sizes, assuming all storage memory can be 
borrowed into execution
+        // memory pool, regardless of Spark option 
spark.memory.storageFraction.
+        conf.getSizeAsBytes(SPARK_OFFHEAP_SIZE_KEY, 0L)
+      }
 
-    conf.set(GLUTEN_OFFHEAP_SIZE_IN_BYTES_KEY, offHeapSize.toString)
+    conf.set(COLUMNAR_OFFHEAP_SIZE_IN_BYTES.key, offHeapSize.toString)
     conf.set(SPARK_OFFHEAP_SIZE_KEY, offHeapSize.toString)
 
     val offHeapPerTask = offHeapSize / taskSlots
-    conf.set(GLUTEN_TASK_OFFHEAP_SIZE_IN_BYTES_KEY, offHeapPerTask.toString)
+    conf.set(COLUMNAR_TASK_OFFHEAP_SIZE_IN_BYTES.key, offHeapPerTask.toString)
 
     // If we are using dynamic off-heap sizing, we should also enable off-heap 
memory
     // officially.
-    if (conf.getBoolean(GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED, false)) {
+    if (
+      conf.getBoolean(
+        DYNAMIC_OFFHEAP_SIZING_ENABLED.key,
+        DYNAMIC_OFFHEAP_SIZING_ENABLED.defaultValue.get)
+    ) {
       conf.set(SPARK_OFFHEAP_ENABLED, "true")
 
       // We already sized the off-heap per task in a conservative manner, so 
we can just
       // use it.
-      conf.set(GLUTEN_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES_KEY, 
offHeapPerTask.toString)
+      conf.set(COLUMNAR_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES.key, 
offHeapPerTask.toString)
     } else {
       // Let's make sure this is set to false explicitly if it is not on as it
       // is looked up when throwing OOF exceptions.
-      conf.set(GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED, "false")
+      conf.set(
+        DYNAMIC_OFFHEAP_SIZING_ENABLED.key,
+        DYNAMIC_OFFHEAP_SIZING_ENABLED.defaultValueString)
 
       // Pessimistic off-heap sizes, with the assumption that all 
non-borrowable storage memory
       // determined by spark.memory.storageFraction was used.
       val fraction = 1.0d - conf.getDouble("spark.memory.storageFraction", 
0.5d)
       val conservativeOffHeapPerTask = (offHeapSize * fraction).toLong / 
taskSlots
       conf.set(
-        GLUTEN_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES_KEY,
+        COLUMNAR_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES.key,
         conservativeOffHeapPerTask.toString)
     }
 
diff --git 
a/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenSessionExtensions.scala
 
b/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenSessionExtensions.scala
index 9fe0d31d46..4e7b2a034c 100644
--- 
a/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenSessionExtensions.scala
+++ 
b/gluten-core/src/main/scala/org/apache/gluten/extension/GlutenSessionExtensions.scala
@@ -32,7 +32,7 @@ private[gluten] class GlutenSessionExtensions
     injector.control.disableOn {
       session =>
         val glutenEnabledGlobally = session.conf
-          .get(GlutenConfig.GLUTEN_ENABLED_KEY, 
GlutenConfig.GLUTEN_ENABLED_BY_DEFAULT.toString)
+          .get(GlutenConfig.GLUTEN_ENABLED.key, 
GlutenConfig.GLUTEN_ENABLED.defaultValueString)
           .toBoolean
         val disabled = !glutenEnabledGlobally
         logDebug(s"Gluten is disabled by variable: glutenEnabledGlobally: 
$glutenEnabledGlobally")
diff --git 
a/gluten-core/src/main/scala/org/apache/gluten/softaffinity/SoftAffinityManager.scala
 
b/gluten-core/src/main/scala/org/apache/gluten/softaffinity/SoftAffinityManager.scala
index f755af8d36..bba178a796 100644
--- 
a/gluten-core/src/main/scala/org/apache/gluten/softaffinity/SoftAffinityManager.scala
+++ 
b/gluten-core/src/main/scala/org/apache/gluten/softaffinity/SoftAffinityManager.scala
@@ -40,16 +40,17 @@ abstract class AffinityManager extends LogLevelUtil with 
Logging {
   private val resourceRWLock = new ReentrantReadWriteLock(true)
 
   lazy val softAffinityReplicationNum: Int =
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM_DEFAULT_VALUE
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM.defaultValue.get
 
-  lazy val minOnTargetHosts: Int = 
GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS_DEFAULT_VALUE
+  lazy val minOnTargetHosts: Int =
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS.defaultValue.get
 
   lazy val usingSoftAffinity: Boolean = true
 
   lazy val detectDuplicateReading: Boolean = true
 
   lazy val duplicateReadingMaxCacheItems: Int =
-    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS_DEFAULT_VALUE
+    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS.defaultValue.get
 
   // (execId, host) list
   private val idForExecutors = new mutable.ListBuffer[(String, String)]()
@@ -305,27 +306,27 @@ abstract class AffinityManager extends LogLevelUtil with 
Logging {
 
 object SoftAffinityManager extends AffinityManager {
   override lazy val usingSoftAffinity: Boolean = SparkEnv.get.conf.getBoolean(
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED,
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED_DEFAULT_VALUE
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED.key,
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED.defaultValue.get
   )
 
   override lazy val softAffinityReplicationNum: Int = SparkEnv.get.conf.getInt(
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM,
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM_DEFAULT_VALUE)
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM.key,
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM.defaultValue.get)
 
   override lazy val minOnTargetHosts: Int = SparkEnv.get.conf.getInt(
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS,
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS_DEFAULT_VALUE
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS.key,
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS.defaultValue.get
   )
 
   override lazy val detectDuplicateReading: Boolean = 
SparkEnv.get.conf.getBoolean(
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED,
-    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED_DEFAULT_VALUE
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED.key,
+    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED.defaultValue.get
   ) &&
     SparkShimLoader.getSparkShims.supportDuplicateReadingTracking
 
   override lazy val duplicateReadingMaxCacheItems: Int = 
SparkEnv.get.conf.getInt(
-    GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS,
-    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS_DEFAULT_VALUE
+    GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS.key,
+    
GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS.defaultValue.get
   )
 }
diff --git 
a/gluten-iceberg/src-iceberg/test/scala/org/apache/gluten/execution/IcebergSuite.scala
 
b/gluten-iceberg/src-iceberg/test/scala/org/apache/gluten/execution/IcebergSuite.scala
index 6d7e57aa8a..459b332c7e 100644
--- 
a/gluten-iceberg/src-iceberg/test/scala/org/apache/gluten/execution/IcebergSuite.scala
+++ 
b/gluten-iceberg/src-iceberg/test/scala/org/apache/gluten/execution/IcebergSuite.scala
@@ -63,7 +63,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
     val rightTable = "p_int_tb"
     withTable(leftTable, rightTable) {
       // Partition key of string type.
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         // Gluten does not support write iceberg table.
         spark.sql(s"""
                      |create table $leftTable(id int, name string, p string)
@@ -84,7 +84,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
 
       // Partition key of integer type.
       withSQLConf(
-        GlutenConfig.GLUTEN_ENABLED_KEY -> "false"
+        GlutenConfig.GLUTEN_ENABLED.key -> "false"
       ) {
         // Gluten does not support write iceberg table.
         spark.sql(s"""
@@ -143,7 +143,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
     val rightTable = "p_int_tb"
     withTable(leftTable, rightTable) {
       // Partition key of string type.
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         // Gluten does not support write iceberg table.
         spark.sql(s"""
                      |create table $leftTable(id int, name string, p int)
@@ -164,7 +164,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
 
       // Partition key of integer type.
       withSQLConf(
-        GlutenConfig.GLUTEN_ENABLED_KEY -> "false"
+        GlutenConfig.GLUTEN_ENABLED.key -> "false"
       ) {
         // Gluten does not support write iceberg table.
         spark.sql(s"""
@@ -223,7 +223,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
     val rightTable = "p_int_tb"
     withTable(leftTable, rightTable) {
       // Partition key of string type.
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         // Gluten does not support write iceberg table.
         spark.sql(s"""
                      |create table $leftTable(id int, name string, p int)
@@ -244,7 +244,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
 
       // Partition key of integer type.
       withSQLConf(
-        GlutenConfig.GLUTEN_ENABLED_KEY -> "false"
+        GlutenConfig.GLUTEN_ENABLED.key -> "false"
       ) {
         // Gluten does not support write iceberg table.
         spark.sql(s"""
@@ -338,7 +338,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
 
   test("iceberg read mor table - delete and update") {
     withTable("iceberg_mor_tb") {
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         spark.sql("""
                     |create table iceberg_mor_tb (
                     |  id int,
@@ -390,7 +390,7 @@ abstract class IcebergSuite extends 
WholeStageTransformerSuite {
 
   test("iceberg read mor table - merge into") {
     withTable("iceberg_mor_tb", "merge_into_source_tb") {
-      withSQLConf(GlutenConfig.GLUTEN_ENABLED_KEY -> "false") {
+      withSQLConf(GlutenConfig.GLUTEN_ENABLED.key -> "false") {
         spark.sql("""
                     |create table iceberg_mor_tb (
                     |  id int,
diff --git 
a/gluten-substrait/src/main/scala/org/apache/gluten/expression/UDFMappings.scala
 
b/gluten-substrait/src/main/scala/org/apache/gluten/expression/UDFMappings.scala
index 948f6f4d16..568688c0bd 100644
--- 
a/gluten-substrait/src/main/scala/org/apache/gluten/expression/UDFMappings.scala
+++ 
b/gluten-substrait/src/main/scala/org/apache/gluten/expression/UDFMappings.scala
@@ -58,19 +58,19 @@ object UDFMappings extends Logging {
   }
 
   def loadFromSparkConf(conf: SparkConf): Unit = {
-    val strHiveUDFs = conf.get(GlutenConfig.GLUTEN_SUPPORTED_HIVE_UDFS, "")
+    val strHiveUDFs = conf.get(GlutenConfig.GLUTEN_SUPPORTED_HIVE_UDFS.key, "")
     if (!StringUtils.isBlank(strHiveUDFs)) {
       parseStringToMap(strHiveUDFs, hiveUDFMap)
       logDebug(s"loaded hive udf mappings:${hiveUDFMap.toString}")
     }
 
-    val strPythonUDFs = conf.get(GlutenConfig.GLUTEN_SUPPORTED_PYTHON_UDFS, "")
+    val strPythonUDFs = 
conf.get(GlutenConfig.GLUTEN_SUPPORTED_PYTHON_UDFS.key, "")
     if (!StringUtils.isBlank(strPythonUDFs)) {
       parseStringToMap(strPythonUDFs, pythonUDFMap)
       logDebug(s"loaded python udf mappings:${pythonUDFMap.toString}")
     }
 
-    val strScalaUDFs = conf.get(GlutenConfig.GLUTEN_SUPPORTED_SCALA_UDFS, "")
+    val strScalaUDFs = conf.get(GlutenConfig.GLUTEN_SUPPORTED_SCALA_UDFS.key, 
"")
     if (!StringUtils.isBlank(strScalaUDFs)) {
       parseStringToMap(strScalaUDFs, scalaUDFMap)
       logDebug(s"loaded scala udf mappings:${scalaUDFMap.toString}")
diff --git 
a/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala
 
b/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala
index 66ca97277e..6f49e47957 100644
--- 
a/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala
+++ 
b/gluten-substrait/src/main/scala/org/apache/spark/sql/execution/ColumnarShuffleExchangeExec.scala
@@ -17,7 +17,7 @@
 package org.apache.spark.sql.execution
 
 import org.apache.gluten.backendsapi.BackendsApiManager
-import org.apache.gluten.config.GlutenConfig
+import org.apache.gluten.config.ReservedKeys
 import org.apache.gluten.execution.ValidatablePlan
 import org.apache.gluten.extension.ValidationResult
 import org.apache.gluten.extension.columnar.transition.Convention
@@ -127,8 +127,8 @@ case class ColumnarShuffleExchangeExec(
 
   override def stringArgs: Iterator[Any] = {
     val shuffleWriterType = {
-      if (useSortBasedShuffle) GlutenConfig.GLUTEN_SORT_SHUFFLE_WRITER
-      else GlutenConfig.GLUTEN_HASH_SHUFFLE_WRITER
+      if (useSortBasedShuffle) ReservedKeys.GLUTEN_SORT_SHUFFLE_WRITER
+      else ReservedKeys.GLUTEN_HASH_SHUFFLE_WRITER
     }
     super.stringArgs ++ Iterator(s"[shuffle_writer_type=$shuffleWriterType]")
   }
diff --git 
a/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinitySuite.scala
 
b/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinitySuite.scala
index caca7ca9e3..7119090fb4 100644
--- 
a/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinitySuite.scala
+++ 
b/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinitySuite.scala
@@ -36,9 +36,9 @@ import scala.collection.mutable.ListBuffer
 class SoftAffinitySuite extends QueryTest with SharedSparkSession with 
PredicateHelper {
 
   override protected def sparkConf: SparkConf = super.sparkConf
-    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED, "true")
-    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM, "2")
-    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS, "2")
+    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED.key, "true")
+    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM.key, "2")
+    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS.key, "2")
 
   val scalaVersion = scala.util.Properties.versionNumberString
 
diff --git 
a/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinityWithRDDInfoSuite.scala
 
b/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinityWithRDDInfoSuite.scala
index 2100518e74..12040e505d 100644
--- 
a/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinityWithRDDInfoSuite.scala
+++ 
b/gluten-substrait/src/test/scala/org/apache/spark/softaffinity/SoftAffinityWithRDDInfoSuite.scala
@@ -45,10 +45,10 @@ object FakeSoftAffinityManager extends AffinityManager {
 class SoftAffinityWithRDDInfoSuite extends QueryTest with SharedSparkSession 
with PredicateHelper {
 
   override protected def sparkConf: SparkConf = super.sparkConf
-    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED, "true")
-    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED, 
"true")
-    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM, "2")
-    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS, "2")
+    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_ENABLED.key, "true")
+    
.set(GlutenConfig.GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED.key, 
"true")
+    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM.key, "2")
+    .set(GlutenConfig.GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS.key, "2")
     .set(GlutenConfig.SOFT_AFFINITY_LOG_LEVEL.key, "INFO")
 
   test("Soft Affinity Scheduler with duplicate reading detection") {
diff --git 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
index 6605a942cc..ab4c6de441 100644
--- 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
+++ 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/DummyFilterColmnarHelper.scala
@@ -86,7 +86,7 @@ object DummyFilterColmnarHelper {
         .config("spark.memory.offHeap.size", "1024MB")
         .config("spark.plugins", "org.apache.gluten.GlutenPlugin")
         .config("spark.shuffle.manager", 
"org.apache.spark.shuffle.sort.ColumnarShuffleManager")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .config("spark.io.compression.codec", "LZ4")
         .config("spark.gluten.sql.enable.native.validation", "false")
     } else {
diff --git 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
index a16b459f8e..128ce144f2 100644
--- 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
+++ 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenSQLTestsBaseTrait.scala
@@ -105,7 +105,7 @@ object GlutenSQLTestsBaseTrait {
         .set("spark.io.compression.codec", "LZ4")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set("spark.sql.files.openCostInBytes", "134217728")
         .set("spark.unsafe.exceptionOnMemoryLeak", "true")
     } else {
diff --git 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala
index 9a975812f0..330bf3b7ac 100644
--- 
a/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala
+++ 
b/gluten-ut/common/src/test/scala/org/apache/spark/sql/GlutenTestsTrait.scala
@@ -113,7 +113,7 @@ trait GlutenTestsTrait extends GlutenTestsCommonTrait {
           .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
           .config("spark.gluten.sql.enable.native.validation", "false")
           .config("spark.sql.files.openCostInBytes", "134217728")
-          .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+          .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
           .config("spark.unsafe.exceptionOnMemoryLeak", "true")
           .config(GlutenConfig.UT_STATISTIC.key, "true")
           .getOrCreate()
diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index 6d1744423f..739446111b 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -195,7 +195,7 @@ class GlutenSQLQueryTestSuite
         .set("spark.io.compression.codec", "LZ4")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set("spark.sql.files.openCostInBytes", "134217728")
         .set("spark.unsafe.exceptionOnMemoryLeak", "true")
     } else {
diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index 88e4d33f12..c55f4bf7f0 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -82,7 +82,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
         .set("spark.gluten.sql.enable.native.validation", "false")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
-        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set(
           "spark.sql.catalog.spark_catalog",
           
"org.apache.spark.sql.execution.datasources.v2.clickhouse.ClickHouseSparkCatalog")
@@ -226,7 +226,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
   override def afterAll(): Unit = {
     if (BackendTestUtils.isCHBackendLoaded()) {
       val libPath =
-        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       JniLibLoader.unloadFromPath(libPath)
     }
     super.afterAll()
diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index f516f71d69..838d30e7ca 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -89,7 +89,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite 
with GlutenTestsCommon
         .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .config("spark.gluten.sql.enable.native.validation", "false")
         .config("spark.sql.files.openCostInBytes", "134217728")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .config("spark.unsafe.exceptionOnMemoryLeak", "true")
         .getOrCreate()
     } else {
diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 56ec103349..0b14467aba 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -28,7 +28,7 @@ class GlutenHiveSQLQueryCHSuite extends 
GlutenHiveSQLQuerySuiteBase {
   override def sparkConf: SparkConf = {
     defaultSparkConf
       .set("spark.plugins", "org.apache.gluten.GlutenPlugin")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, SystemParameters.getClickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       .set("spark.gluten.sql.enable.native.validation", "false")
       .set("spark.gluten.sql.native.writer.enabled", "true")
       .set("spark.sql.storeAssignmentPolicy", "legacy")
diff --git 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index ebfedfed62..be9063c839 100644
--- 
a/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++ 
b/gluten-ut/spark32/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -64,7 +64,7 @@ class SparkFunctionStatistics extends QueryTest {
           .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
           .config("spark.gluten.sql.enable.native.validation", "false")
           .config("spark.sql.files.openCostInBytes", "134217728")
-          .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+          .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
           .config("spark.unsafe.exceptionOnMemoryLeak", "true")
           .getOrCreate()
       } else {
diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index 893acc6501..84d677ef59 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -195,7 +195,7 @@ class GlutenSQLQueryTestSuite
         .set("spark.io.compression.codec", "LZ4")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set("spark.sql.files.openCostInBytes", "134217728")
         .set("spark.unsafe.exceptionOnMemoryLeak", "true")
     } else {
diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index 68e86c1ba1..aa5ee0a881 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -82,7 +82,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
         .set("spark.gluten.sql.enable.native.validation", "false")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
-        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set(
           "spark.sql.catalog.spark_catalog",
           
"org.apache.spark.sql.execution.datasources.v2.clickhouse.ClickHouseSparkCatalog")
@@ -226,7 +226,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
   override def afterAll(): Unit = {
     if (BackendTestUtils.isCHBackendLoaded()) {
       val libPath =
-        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       JniLibLoader.unloadFromPath(libPath)
     }
     super.afterAll()
diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index f68000fd0b..f5fd730796 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -66,7 +66,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite 
with GlutenTestsCommon
         .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .config("spark.gluten.sql.enable.native.validation", "false")
         .config("spark.sql.files.openCostInBytes", "134217728")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .config("spark.unsafe.exceptionOnMemoryLeak", "true")
         .getOrCreate()
     } else {
diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 56ec103349..0b14467aba 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -28,7 +28,7 @@ class GlutenHiveSQLQueryCHSuite extends 
GlutenHiveSQLQuerySuiteBase {
   override def sparkConf: SparkConf = {
     defaultSparkConf
       .set("spark.plugins", "org.apache.gluten.GlutenPlugin")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, SystemParameters.getClickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       .set("spark.gluten.sql.enable.native.validation", "false")
       .set("spark.gluten.sql.native.writer.enabled", "true")
       .set("spark.sql.storeAssignmentPolicy", "legacy")
diff --git 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index eabe7a4792..69969e948e 100644
--- 
a/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++ 
b/gluten-ut/spark33/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -64,7 +64,7 @@ class SparkFunctionStatistics extends QueryTest {
           .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
           .config("spark.gluten.sql.enable.native.validation", "false")
           .config("spark.sql.files.openCostInBytes", "134217728")
-          .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+          .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
           .config("spark.unsafe.exceptionOnMemoryLeak", "true")
           .getOrCreate()
       } else {
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index 740f9c1595..7c0e9801c2 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -199,7 +199,7 @@ class GlutenSQLQueryTestSuite
         .set("spark.io.compression.codec", "LZ4")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set("spark.sql.files.openCostInBytes", "134217728")
         .set("spark.unsafe.exceptionOnMemoryLeak", "true")
     } else {
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index f7c134a98e..d065495b35 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -83,7 +83,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
         .set("spark.gluten.sql.enable.native.validation", "false")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
-        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set(
           "spark.sql.catalog.spark_catalog",
           
"org.apache.spark.sql.execution.datasources.v2.clickhouse.ClickHouseSparkCatalog")
@@ -228,7 +228,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
   override def afterAll(): Unit = {
     if (BackendTestUtils.isCHBackendLoaded()) {
       val libPath =
-        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       JniLibLoader.unloadFromPath(libPath)
     }
     super.afterAll()
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index 20374382de..e9d5f9bbdc 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -64,7 +64,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite 
with GlutenTestsCommon
         .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .config("spark.gluten.sql.enable.native.validation", "false")
         .config("spark.sql.files.openCostInBytes", "134217728")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .config("spark.unsafe.exceptionOnMemoryLeak", "true")
         .getOrCreate()
     } else {
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 56ec103349..0b14467aba 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -28,7 +28,7 @@ class GlutenHiveSQLQueryCHSuite extends 
GlutenHiveSQLQuerySuiteBase {
   override def sparkConf: SparkConf = {
     defaultSparkConf
       .set("spark.plugins", "org.apache.gluten.GlutenPlugin")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, SystemParameters.getClickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       .set("spark.gluten.sql.enable.native.validation", "false")
       .set("spark.gluten.sql.native.writer.enabled", "true")
       .set("spark.sql.storeAssignmentPolicy", "legacy")
diff --git 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index 6a7fe611ee..f483d714f1 100644
--- 
a/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++ 
b/gluten-ut/spark34/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -63,7 +63,7 @@ class SparkFunctionStatistics extends QueryTest {
           .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
           .config("spark.gluten.sql.enable.native.validation", "false")
           .config("spark.sql.files.openCostInBytes", "134217728")
-          .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+          .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
           .config("spark.unsafe.exceptionOnMemoryLeak", "true")
           .getOrCreate()
       } else {
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
index fd731169ea..3eebda83a0 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/GlutenSQLQueryTestSuite.scala
@@ -197,7 +197,7 @@ class GlutenSQLQueryTestSuite
         .set("spark.io.compression.codec", "LZ4")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set("spark.sql.files.openCostInBytes", "134217728")
         .set("spark.unsafe.exceptionOnMemoryLeak", "true")
     } else {
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
index 68e86c1ba1..aa5ee0a881 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/benchmarks/ParquetReadBenchmark.scala
@@ -82,7 +82,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
         .set("spark.gluten.sql.enable.native.validation", "false")
         .set("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .set("spark.gluten.sql.columnar.separate.scan.rdd.for.ch", "false")
-        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .setIfMissing(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .set(
           "spark.sql.catalog.spark_catalog",
           
"org.apache.spark.sql.execution.datasources.v2.clickhouse.ClickHouseSparkCatalog")
@@ -226,7 +226,7 @@ object ParquetReadBenchmark extends SqlBasedBenchmark {
   override def afterAll(): Unit = {
     if (BackendTestUtils.isCHBackendLoaded()) {
       val libPath =
-        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        spark.conf.get(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       JniLibLoader.unloadFromPath(libPath)
     }
     super.afterAll()
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
index f68000fd0b..f5fd730796 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/execution/joins/GlutenBroadcastJoinSuite.scala
@@ -66,7 +66,7 @@ class GlutenBroadcastJoinSuite extends BroadcastJoinSuite 
with GlutenTestsCommon
         .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
         .config("spark.gluten.sql.enable.native.validation", "false")
         .config("spark.sql.files.openCostInBytes", "134217728")
-        .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
         .config("spark.unsafe.exceptionOnMemoryLeak", "true")
         .getOrCreate()
     } else {
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
index 56ec103349..0b14467aba 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/hive/execution/GlutenHiveSQLQueryCHSuite.scala
@@ -28,7 +28,7 @@ class GlutenHiveSQLQueryCHSuite extends 
GlutenHiveSQLQuerySuiteBase {
   override def sparkConf: SparkConf = {
     defaultSparkConf
       .set("spark.plugins", "org.apache.gluten.GlutenPlugin")
-      .set(GlutenConfig.GLUTEN_LIB_PATH, SystemParameters.getClickHouseLibPath)
+      .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
       .set("spark.gluten.sql.enable.native.validation", "false")
       .set("spark.gluten.sql.native.writer.enabled", "true")
       .set("spark.sql.storeAssignmentPolicy", "legacy")
diff --git 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
index 6a7fe611ee..f483d714f1 100644
--- 
a/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
+++ 
b/gluten-ut/spark35/src/test/scala/org/apache/spark/sql/statistics/SparkFunctionStatistics.scala
@@ -63,7 +63,7 @@ class SparkFunctionStatistics extends QueryTest {
           .config("spark.gluten.sql.columnar.backend.ch.worker.id", "1")
           .config("spark.gluten.sql.enable.native.validation", "false")
           .config("spark.sql.files.openCostInBytes", "134217728")
-          .config(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+          .config(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
           .config("spark.unsafe.exceptionOnMemoryLeak", "true")
           .getOrCreate()
       } else {
diff --git 
a/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
 
b/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
index 7a6c6cd8fc..32eb10a32f 100644
--- 
a/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
+++ 
b/gluten-ut/test/src/test/scala/org/apache/gluten/execution/MergeTwoPhasesHashBaseAggregateSuite.scala
@@ -57,7 +57,7 @@ abstract class BaseMergeTwoPhasesHashBaseAggregateSuite 
extends WholeStageTransf
     if (BackendTestUtils.isCHBackendLoaded()) {
       conf
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
     }
     conf
   }
diff --git 
a/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
 
b/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
index 4daef607d7..14a07aea5b 100644
--- 
a/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
+++ 
b/gluten-ut/test/src/test/scala/org/apache/gluten/expressions/GlutenExpressionMappingSuite.scala
@@ -43,7 +43,7 @@ class GlutenExpressionMappingSuite
     if (BackendTestUtils.isCHBackendLoaded()) {
       conf
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
     }
     conf
   }
diff --git 
a/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
 
b/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
index c002060d06..0068e83a6d 100644
--- 
a/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
+++ 
b/gluten-ut/test/src/test/scala/org/apache/gluten/extension/GlutenExtensionRewriteRuleSuite.scala
@@ -33,7 +33,7 @@ class GlutenExtensionRewriteRuleSuite extends 
WholeStageTransformerSuite {
     if (BackendTestUtils.isCHBackendLoaded()) {
       conf
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
     }
     conf
   }
diff --git 
a/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala 
b/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
index 8058aab259..74d0e44db7 100644
--- a/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
+++ b/gluten-ut/test/src/test/scala/org/apache/gluten/sql/SQLQuerySuite.scala
@@ -41,7 +41,7 @@ class SQLQuerySuite extends WholeStageTransformerSuite {
     if (BackendTestUtils.isCHBackendLoaded()) {
       conf
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
     }
     conf
   }
diff --git 
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
 
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
index e9d6233e91..adf72a3f63 100644
--- 
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
+++ 
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/GlutenExpressionDataTypesValidation.scala
@@ -46,7 +46,7 @@ class GlutenExpressionDataTypesValidation extends 
WholeStageTransformerSuite {
     if (BackendTestUtils.isCHBackendLoaded()) {
       conf
         .set("spark.gluten.sql.enable.native.validation", "false")
-        .set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+        .set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
     }
     conf
   }
diff --git 
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
 
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
index ebf17444e6..b01b543006 100644
--- 
a/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
+++ 
b/gluten-ut/test/src/test/scala/org/apache/spark/sql/datasources/GlutenNoopWriterRuleSuite.scala
@@ -37,7 +37,7 @@ class GlutenNoopWriterRuleSuite extends GlutenQueryTest with 
SharedSparkSession
       .set("spark.ui.enabled", "false")
       .set("spark.gluten.ui.enabled", "false")
     if (BackendTestUtils.isCHBackendLoaded()) {
-      conf.set(GlutenConfig.GLUTEN_LIB_PATH, 
SystemParameters.getClickHouseLibPath)
+      conf.set(GlutenConfig.GLUTEN_LIB_PATH.key, 
SystemParameters.getClickHouseLibPath)
     }
     conf
   }
diff --git 
a/shims/common/src/main/scala/org/apache/gluten/config/GlutenConfig.scala 
b/shims/common/src/main/scala/org/apache/gluten/config/GlutenConfig.scala
index e4d3a76326..94a95ae515 100644
--- a/shims/common/src/main/scala/org/apache/gluten/config/GlutenConfig.scala
+++ b/shims/common/src/main/scala/org/apache/gluten/config/GlutenConfig.scala
@@ -160,7 +160,9 @@ class GlutenConfig(conf: SQLConf) extends Logging {
 
   def celebornShuffleWriterType: String =
     conf
-      .getConfString("spark.celeborn.client.spark.shuffle.writer", 
GLUTEN_HASH_SHUFFLE_WRITER)
+      .getConfString(
+        "spark.celeborn.client.spark.shuffle.writer",
+        ReservedKeys.GLUTEN_HASH_SHUFFLE_WRITER)
       .toLowerCase(Locale.ROOT)
 
   def enableColumnarShuffle: Boolean = getConf(COLUMNAR_SHUFFLE_ENABLED)
@@ -177,8 +179,6 @@ class GlutenConfig(conf: SQLConf) extends Logging {
 
   def tmpFile: Option[String] = getConf(COLUMNAR_TEMP_DIR)
 
-  @deprecated def broadcastCacheTimeout: Int = 
getConf(COLUMNAR_BROADCAST_CACHE_TIMEOUT)
-
   def columnarShuffleSortPartitionsThreshold: Int =
     getConf(COLUMNAR_SHUFFLE_SORT_PARTITIONS_THRESHOLD)
 
@@ -507,20 +507,7 @@ object GlutenConfig {
     ConfigBuilder(key).onCreate(_ => SQLConf.registerStaticConfigKey(key))
   }
 
-  val GLUTEN_ENABLED_BY_DEFAULT = true
-  val GLUTEN_ENABLED_KEY = "spark.gluten.enabled"
-  val GLUTEN_LIB_NAME = "spark.gluten.sql.columnar.libname"
-  val GLUTEN_LIB_PATH = "spark.gluten.sql.columnar.libpath"
-  val GLUTEN_EXECUTOR_LIB_PATH = "spark.gluten.sql.columnar.executor.libpath"
-
   // Hive configurations.
-  val SPARK_PREFIX = "spark."
-  val HIVE_EXEC_ORC_STRIPE_SIZE = "hive.exec.orc.stripe.size"
-  val SPARK_HIVE_EXEC_ORC_STRIPE_SIZE: String = SPARK_PREFIX + 
HIVE_EXEC_ORC_STRIPE_SIZE
-  val HIVE_EXEC_ORC_ROW_INDEX_STRIDE = "hive.exec.orc.row.index.stride"
-  val SPARK_HIVE_EXEC_ORC_ROW_INDEX_STRIDE: String = SPARK_PREFIX + 
HIVE_EXEC_ORC_ROW_INDEX_STRIDE
-  val HIVE_EXEC_ORC_COMPRESS = "hive.exec.orc.compress"
-  val SPARK_HIVE_EXEC_ORC_COMPRESS: String = SPARK_PREFIX + 
HIVE_EXEC_ORC_COMPRESS
   val SPARK_SQL_PARQUET_COMPRESSION_CODEC: String = 
"spark.sql.parquet.compression.codec"
   val PARQUET_BLOCK_SIZE: String = "parquet.block.size"
   val PARQUET_BLOCK_ROWS: String = "parquet.block.rows"
@@ -551,9 +538,6 @@ object GlutenConfig {
   val S3_CONNECTION_MAXIMUM = "fs.s3a.connection.maximum"
   val SPARK_S3_CONNECTION_MAXIMUM: String = HADOOP_PREFIX + 
S3_CONNECTION_MAXIMUM
 
-  // Hardware acceleraters backend
-  val GLUTEN_SHUFFLE_CODEC_BACKEND = 
"spark.gluten.sql.columnar.shuffle.codecBackend"
-
   // ABFS config
   val ABFS_PREFIX = "fs.azure."
 
@@ -562,14 +546,10 @@ object GlutenConfig {
   val STORAGE_ROOT_URL = "storage.root.url"
   val AUTH_TYPE = "auth.type"
   val AUTH_SERVICE_ACCOUNT_JSON_KEYFILE = "auth.service.account.json.keyfile"
-  val HTTP_MAX_RETRY_COUNT = "http.max.retry"
-  val HTTP_MAX_RETRY_TIME = "http.max.retry-time"
   val SPARK_GCS_STORAGE_ROOT_URL: String = HADOOP_PREFIX + GCS_PREFIX + 
STORAGE_ROOT_URL
   val SPARK_GCS_AUTH_TYPE: String = HADOOP_PREFIX + GCS_PREFIX + AUTH_TYPE
   val SPARK_GCS_AUTH_SERVICE_ACCOUNT_JSON_KEYFILE: String =
     HADOOP_PREFIX + GCS_PREFIX + AUTH_SERVICE_ACCOUNT_JSON_KEYFILE
-  val SPARK_GCS_MAX_RETRY_COUNT: String = HADOOP_PREFIX + GCS_PREFIX + 
HTTP_MAX_RETRY_COUNT
-  val SPARK_GCS_MAX_RETRY_TIME: String = HADOOP_PREFIX + GCS_PREFIX + 
HTTP_MAX_RETRY_TIME
 
   // QAT config
   val GLUTEN_QAT_BACKEND_NAME = "qat"
@@ -595,113 +575,12 @@ object GlutenConfig {
   val SPARK_SHUFFLE_SPILL_COMPRESS = "spark.shuffle.spill.compress"
   val SPARK_SHUFFLE_SPILL_COMPRESS_DEFAULT: Boolean = true
 
-  // For Soft Affinity Scheduling
-  // Enable Soft Affinity Scheduling, default value is false
-  val GLUTEN_SOFT_AFFINITY_ENABLED = "spark.gluten.soft-affinity.enabled"
-  val GLUTEN_SOFT_AFFINITY_ENABLED_DEFAULT_VALUE = false
-  // Calculate the number of the replications for scheduling to the target 
executors per file
-  val GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM = 
"spark.gluten.soft-affinity.replications.num"
-  val GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM_DEFAULT_VALUE = 2
-  // For on HDFS, if there are already target hosts,
-  // and then prefer to use the original target hosts to schedule
-  val GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS = 
"spark.gluten.soft-affinity.min.target-hosts"
-  val GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS_DEFAULT_VALUE = 1
-
-  // Enable Soft Affinity duplicate reading detection, default value is false
-  val GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED =
-    "spark.gluten.soft-affinity.duplicateReadingDetect.enabled"
-  val GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED_DEFAULT_VALUE = 
false
-  // Enable Soft Affinity duplicate reading detection, default value is 10000
-  val GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS =
-    "spark.gluten.soft-affinity.duplicateReading.maxCacheItems"
-  val GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS_DEFAULT_VALUE = 
10000
-
-  // Pass through to native conf
-  val GLUTEN_SAVE_DIR = "spark.gluten.saveDir"
-
-  val GLUTEN_DEBUG_MODE = "spark.gluten.sql.debug"
-  val GLUTEN_DEBUG_KEEP_JNI_WORKSPACE = 
"spark.gluten.sql.debug.keepJniWorkspace"
-  val GLUTEN_DEBUG_KEEP_JNI_WORKSPACE_DIR = 
"spark.gluten.sql.debug.keepJniWorkspaceDir"
-
-  // Added back to Spark Conf during executor initialization
-  val GLUTEN_NUM_TASK_SLOTS_PER_EXECUTOR_KEY = 
"spark.gluten.numTaskSlotsPerExecutor"
-  val GLUTEN_OVERHEAD_SIZE_IN_BYTES_KEY = 
"spark.gluten.memoryOverhead.size.in.bytes"
-  val GLUTEN_OFFHEAP_SIZE_IN_BYTES_KEY = 
"spark.gluten.memory.offHeap.size.in.bytes"
-  val GLUTEN_TASK_OFFHEAP_SIZE_IN_BYTES_KEY = 
"spark.gluten.memory.task.offHeap.size.in.bytes"
-  val GLUTEN_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES_KEY =
-    "spark.gluten.memory.conservative.task.offHeap.size.in.bytes"
-
-  // Batch size.
-  val GLUTEN_MAX_BATCH_SIZE_KEY = "spark.gluten.sql.columnar.maxBatchSize"
-
-  // Shuffle writer type.
-  val GLUTEN_HASH_SHUFFLE_WRITER = "hash"
-  val GLUTEN_SORT_SHUFFLE_WRITER = "sort"
-  val GLUTEN_RSS_SORT_SHUFFLE_WRITER = "rss_sort"
-
-  // Shuffle Writer buffer size.
-  val GLUTEN_SHUFFLE_WRITER_BUFFER_SIZE = 
"spark.gluten.shuffleWriter.bufferSize"
-  val GLUTEN_SHUFFLE_WRITER_MERGE_THRESHOLD = 
"spark.gluten.sql.columnar.shuffle.merge.threshold"
-
-  // Shuffle reader buffer size.
-  val GLUTEN_SHUFFLE_READER_BUFFER_SIZE = 
"spark.gluten.sql.columnar.shuffle.readerBufferSize"
-
-  // Controls whether to load DLL from jars. User can get dependent native 
libs packed into a jar
-  // by executing dev/package.sh. Then, with that jar configured, Gluten can 
load the native libs
-  // at runtime. This config is just for velox backend. And it is NOT 
applicable to the situation
-  // where deployed gluten jar is generated through static build (e.g., 
Gluten's release jar).
-  val GLUTEN_LOAD_LIB_FROM_JAR = "spark.gluten.loadLibFromJar"
-  val GLUTEN_LOAD_LIB_FROM_JAR_DEFAULT = false
-  val GLUTEN_LOAD_LIB_OS = "spark.gluten.loadLibOS"
-  val GLUTEN_LOAD_LIB_OS_VERSION = "spark.gluten.loadLibOSVersion"
-
-  // Expired time of execution with resource relation has cached
-  val GLUTEN_RESOURCE_RELATION_EXPIRED_TIME = 
"spark.gluten.execution.resource.expired.time"
-  // unit: SECONDS, default 1 day
-  val GLUTEN_RESOURCE_RELATION_EXPIRED_TIME_DEFAULT: Int = 86400
-
-  // Supported hive/python/scala udf names
-  val GLUTEN_SUPPORTED_HIVE_UDFS = "spark.gluten.supported.hive.udfs"
-  val GLUTEN_SUPPORTED_PYTHON_UDFS = "spark.gluten.supported.python.udfs"
-  val GLUTEN_SUPPORTED_SCALA_UDFS = "spark.gluten.supported.scala.udfs"
-
-  // FIXME: This only works with CH backend.
-  val GLUTEN_EXTENDED_EXPRESSION_TRAN_CONF =
-    "spark.gluten.sql.columnar.extended.expressions.transformer"
-
-  // This is an internal config property set by Gluten. It is used to hold 
default session timezone
-  // and will be really used by Gluten only if `spark.sql.session.timeZone` is 
not set.
-  val GLUTEN_DEFAULT_SESSION_TIMEZONE_KEY = 
"spark.gluten.sql.session.timeZone.default"
-
-  // Principal of current user
-  val GLUTEN_UGI_USERNAME = "spark.gluten.ugi.username"
-  // Tokens of current user, split by `\0`
-  val GLUTEN_UGI_TOKENS = "spark.gluten.ugi.tokens"
-
-  val GLUTEN_UI_ENABLED = "spark.gluten.ui.enabled"
-
-  val GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED = 
"spark.gluten.memory.dynamic.offHeap.sizing.enabled"
-  val GLUTEN_DYNAMIC_OFFHEAP_SIZING_MEMORY_FRACTION =
-    "spark.gluten.memory.dynamic.offHeap.sizing.memory.fraction"
-
-  val GLUTEN_COST_EVALUATOR_ENABLED = 
"spark.gluten.sql.adaptive.costEvaluator.enabled"
-  val GLUTEN_COST_EVALUATOR_ENABLED_DEFAULT_VALUE = true
-
   var ins: GlutenConfig = _
 
   def get: GlutenConfig = {
     new GlutenConfig(SQLConf.get)
   }
 
-  @deprecated
-  def getTempFile: String = synchronized {
-    if (ins != null && ins.tmpFile.nonEmpty) {
-      ins.tmpFile.get
-    } else {
-      System.getProperty("java.io.tmpdir")
-    }
-  }
-
   def prefixOf(backendName: String): String = {
     GLUTEN_CONFIG_PREFIX + backendName
   }
@@ -712,13 +591,13 @@ object GlutenConfig {
       conf: scala.collection.Map[String, String]): util.Map[String, String] = {
     val nativeConfMap = new util.HashMap[String, String]()
     val keys = Set(
-      GLUTEN_DEBUG_MODE,
-      GLUTEN_SAVE_DIR,
-      GLUTEN_TASK_OFFHEAP_SIZE_IN_BYTES_KEY,
-      GLUTEN_MAX_BATCH_SIZE_KEY,
-      GLUTEN_SHUFFLE_WRITER_BUFFER_SIZE,
+      DEBUG_ENABLED.key,
+      BENCHMARK_SAVE_DIR.key,
+      COLUMNAR_TASK_OFFHEAP_SIZE_IN_BYTES.key,
+      COLUMNAR_MAX_BATCH_SIZE.key,
+      SHUFFLE_WRITER_BUFFER_SIZE.key,
       SQLConf.SESSION_LOCAL_TIMEZONE.key,
-      GLUTEN_DEFAULT_SESSION_TIMEZONE_KEY,
+      GLUTEN_DEFAULT_SESSION_TIMEZONE.key,
       SQLConf.LEGACY_SIZE_OF_NULL.key,
       SQLConf.LEGACY_TIME_PARSER_POLICY.key,
       "spark.io.compression.codec",
@@ -785,11 +664,13 @@ object GlutenConfig {
 
     // Pass the latest tokens to native
     nativeConfMap.put(
-      GLUTEN_UGI_TOKENS,
+      ReservedKeys.GLUTEN_UGI_TOKENS,
       UserGroupInformation.getCurrentUser.getTokens.asScala
         .map(_.encodeToUrlString)
         .mkString("\u0000"))
-    nativeConfMap.put(GLUTEN_UGI_USERNAME, 
UserGroupInformation.getCurrentUser.getUserName)
+    nativeConfMap.put(
+      ReservedKeys.GLUTEN_UGI_USERNAME,
+      UserGroupInformation.getCurrentUser.getUserName)
 
     // return
     nativeConfMap
@@ -835,14 +716,14 @@ object GlutenConfig {
     keyWithDefault.forEach(e => nativeConfMap.put(e._1, conf.getOrElse(e._1, 
e._2)))
 
     val keys = Set(
-      GLUTEN_DEBUG_MODE,
+      DEBUG_ENABLED.key,
       // datasource config
       SPARK_SQL_PARQUET_COMPRESSION_CODEC,
       // datasource config end
 
-      GLUTEN_OVERHEAD_SIZE_IN_BYTES_KEY,
-      GLUTEN_OFFHEAP_SIZE_IN_BYTES_KEY,
-      GLUTEN_TASK_OFFHEAP_SIZE_IN_BYTES_KEY,
+      COLUMNAR_OVERHEAD_SIZE_IN_BYTES.key,
+      COLUMNAR_OFFHEAP_SIZE_IN_BYTES.key,
+      COLUMNAR_TASK_OFFHEAP_SIZE_IN_BYTES.key,
       SPARK_OFFHEAP_ENABLED,
       SESSION_LOCAL_TIMEZONE.key,
       DECIMAL_OPERATIONS_ALLOW_PREC_LOSS.key,
@@ -876,12 +757,116 @@ object GlutenConfig {
   }
 
   val GLUTEN_ENABLED =
-    buildConf(GLUTEN_ENABLED_KEY)
+    buildConf("spark.gluten.enabled")
       .internal()
       .doc("Whether to enable gluten. Default value is true. Just an 
experimental property." +
         " Recommend to enable/disable Gluten through the setting for 
spark.plugins.")
       .booleanConf
-      .createWithDefault(GLUTEN_ENABLED_BY_DEFAULT)
+      .createWithDefault(true)
+
+  val GLUTEN_UI_ENABLED = buildStaticConf("spark.gluten.ui.enabled")
+    .doc(
+      "Whether to enable the gluten web UI, If true, attach the gluten UI page 
" +
+        "to the Spark web UI.")
+    .booleanConf
+    .createWithDefault(true)
+
+  val GLUTEN_DEFAULT_SESSION_TIMEZONE = 
buildConf("spark.gluten.sql.session.timeZone.default")
+    .doc(
+      "used to hold default session timezone and will be really used by Gluten 
only if " +
+        "`spark.sql.session.timeZone` is not set.")
+    .stringConf
+    .createWithDefaultString(SQLConf.SESSION_LOCAL_TIMEZONE.defaultValueString)
+
+  val GLUTEN_LOAD_LIB_OS =
+    buildConf("spark.gluten.loadLibOS")
+      .doc("The shared library loader's OS name.")
+      .stringConf
+      .createOptional
+
+  val GLUTEN_LOAD_LIB_OS_VERSION =
+    buildConf("spark.gluten.loadLibOSVersion")
+      .doc("The shared library loader's OS version.")
+      .stringConf
+      .createOptional
+
+  val GLUTEN_LOAD_LIB_FROM_JAR =
+    buildConf("spark.gluten.loadLibFromJar")
+      .doc("Whether to load shared libraries from jars.")
+      .booleanConf
+      .createWithDefault(false)
+
+  val GLUTEN_RESOURCE_RELATION_EXPIRED_TIME =
+    buildConf("spark.gluten.execution.resource.expired.time")
+      .doc("Expired time of execution with resource relation has cached.")
+      .intConf
+      .createWithDefault(86400)
+
+  val GLUTEN_SUPPORTED_HIVE_UDFS = 
buildConf("spark.gluten.supported.hive.udfs")
+    .doc("Supported hive udf names.")
+    .stringConf
+    .createWithDefault("")
+
+  val GLUTEN_SUPPORTED_PYTHON_UDFS = 
buildConf("spark.gluten.supported.python.udfs")
+    .doc("Supported python udf names.")
+    .stringConf
+    .createWithDefault("")
+
+  val GLUTEN_SUPPORTED_SCALA_UDFS = 
buildConf("spark.gluten.supported.scala.udfs")
+    .doc("Supported scala udf names.")
+    .stringConf
+    .createWithDefault("")
+
+  val GLUTEN_SOFT_AFFINITY_ENABLED =
+    buildConf("spark.gluten.soft-affinity.enabled")
+      .doc("Whether to enable Soft Affinity scheduling.")
+      .booleanConf
+      .createWithDefault(false)
+
+  val GLUTEN_SOFT_AFFINITY_REPLICATIONS_NUM =
+    buildConf("spark.gluten.soft-affinity.replications.num")
+      .doc(
+        "Calculate the number of the replications for scheduling to the target 
executors per file")
+      .intConf
+      .createWithDefault(2)
+
+  val GLUTEN_SOFT_AFFINITY_MIN_TARGET_HOSTS =
+    buildConf("spark.gluten.soft-affinity.min.target-hosts")
+      .doc(
+        "For on HDFS, if there are already target hosts, and then prefer to 
use the " +
+          "original target hosts to schedule")
+      .intConf
+      .createWithDefault(1)
+
+  val GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_DETECT_ENABLED =
+    buildConf("spark.gluten.soft-affinity.duplicateReadingDetect.enabled")
+      .doc("If true, Enable Soft Affinity duplicate reading detection")
+      .booleanConf
+      .createWithDefault(false)
+
+  val GLUTEN_SOFT_AFFINITY_DUPLICATE_READING_MAX_CACHE_ITEMS =
+    buildConf("spark.gluten.soft-affinity.duplicateReading.maxCacheItems")
+      .doc("Enable Soft Affinity duplicate reading detection")
+      .intConf
+      .createWithDefault(10000)
+
+  val GLUTEN_LIB_NAME =
+    buildConf("spark.gluten.sql.columnar.libname")
+      .doc("The gluten library name.")
+      .stringConf
+      .createWithDefault("gluten")
+
+  val GLUTEN_LIB_PATH =
+    buildConf("spark.gluten.sql.columnar.libpath")
+      .doc("The gluten library path.")
+      .stringConf
+      .createWithDefault("")
+
+  val GLUTEN_EXECUTOR_LIB_PATH =
+    buildConf("spark.gluten.sql.columnar.executor.libpath")
+      .doc("The gluten executor library path.")
+      .stringConf
+      .createWithDefault("")
 
   // FIXME the option currently controls both JVM and native validation 
against a Substrait plan.
   val NATIVE_VALIDATION_ENABLED =
@@ -1153,13 +1138,6 @@ object GlutenConfig {
       .stringConf
       .createOptional
 
-  val COLUMNAR_BROADCAST_CACHE_TIMEOUT =
-    buildConf("spark.sql.columnar.sort.broadcast.cache.timeout")
-      .internal()
-      .doc("Deprecated")
-      .intConf
-      .createWithDefault(-1)
-
   val COLUMNAR_SHUFFLE_REALLOC_THRESHOLD =
     buildConf("spark.gluten.sql.columnar.shuffle.realloc.threshold")
       .internal()
@@ -1181,7 +1159,7 @@ object GlutenConfig {
       .createOptional
 
   val COLUMNAR_SHUFFLE_CODEC_BACKEND =
-    buildConf(GlutenConfig.GLUTEN_SHUFFLE_CODEC_BACKEND)
+    buildConf("spark.gluten.sql.columnar.shuffle.codecBackend")
       .internal()
       .stringConf
       .transform(_.toLowerCase(Locale.ROOT))
@@ -1205,24 +1183,24 @@ object GlutenConfig {
       .createWithDefault(100)
 
   val SHUFFLE_WRITER_MERGE_THRESHOLD =
-    buildConf(GLUTEN_SHUFFLE_WRITER_MERGE_THRESHOLD)
+    buildConf("spark.gluten.sql.columnar.shuffle.merge.threshold")
       .internal()
       .doubleConf
       .checkValue(v => v >= 0 && v <= 1, "Shuffle writer merge threshold must 
between [0, 1]")
       .createWithDefault(0.25)
 
   val COLUMNAR_SHUFFLE_READER_BUFFER_SIZE =
-    buildConf(GLUTEN_SHUFFLE_READER_BUFFER_SIZE)
+    buildConf("spark.gluten.sql.columnar.shuffle.readerBufferSize")
       .internal()
       .doc("Buffer size in bytes for shuffle reader reading input stream from 
local or remote.")
       .bytesConf(ByteUnit.BYTE)
       .createWithDefaultString("1MB")
 
   val COLUMNAR_MAX_BATCH_SIZE =
-    buildConf(GLUTEN_MAX_BATCH_SIZE_KEY)
+    buildConf("spark.gluten.sql.columnar.maxBatchSize")
       .internal()
       .intConf
-      .checkValue(_ > 0, s"$GLUTEN_MAX_BATCH_SIZE_KEY must be positive.")
+      .checkValue(_ > 0, s"must be positive.")
       .createWithDefault(4096)
 
   val GLUTEN_COLUMNAR_TO_ROW_MEM_THRESHOLD =
@@ -1233,10 +1211,10 @@ object GlutenConfig {
 
   // if not set, use COLUMNAR_MAX_BATCH_SIZE instead
   val SHUFFLE_WRITER_BUFFER_SIZE =
-    buildConf(GLUTEN_SHUFFLE_WRITER_BUFFER_SIZE)
+    buildConf("spark.gluten.shuffleWriter.bufferSize")
       .internal()
       .intConf
-      .checkValue(_ > 0, s"$GLUTEN_SHUFFLE_WRITER_BUFFER_SIZE must be 
positive.")
+      .checkValue(_ > 0, s"must be positive.")
       .createOptional
 
   val COLUMNAR_LIMIT_ENABLED =
@@ -1336,7 +1314,7 @@ object GlutenConfig {
       .createOptional
 
   val NUM_TASK_SLOTS_PER_EXECUTOR =
-    buildConf(GlutenConfig.GLUTEN_NUM_TASK_SLOTS_PER_EXECUTOR_KEY)
+    buildConf("spark.gluten.numTaskSlotsPerExecutor")
       .internal()
       .doc(
         "Must provide default value since non-execution operations " +
@@ -1346,7 +1324,7 @@ object GlutenConfig {
       .createWithDefaultString("-1")
 
   val COLUMNAR_OVERHEAD_SIZE_IN_BYTES =
-    buildConf(GlutenConfig.GLUTEN_OVERHEAD_SIZE_IN_BYTES_KEY)
+    buildConf("spark.gluten.memoryOverhead.size.in.bytes")
       .internal()
       .doc(
         "Must provide default value since non-execution operations " +
@@ -1356,7 +1334,7 @@ object GlutenConfig {
       .createWithDefaultString("0")
 
   val COLUMNAR_OFFHEAP_SIZE_IN_BYTES =
-    buildConf(GlutenConfig.GLUTEN_OFFHEAP_SIZE_IN_BYTES_KEY)
+    buildConf("spark.gluten.memory.offHeap.size.in.bytes")
       .internal()
       .doc(
         "Must provide default value since non-execution operations " +
@@ -1366,7 +1344,7 @@ object GlutenConfig {
       .createWithDefaultString("0")
 
   val COLUMNAR_TASK_OFFHEAP_SIZE_IN_BYTES =
-    buildConf(GlutenConfig.GLUTEN_TASK_OFFHEAP_SIZE_IN_BYTES_KEY)
+    buildConf("spark.gluten.memory.task.offHeap.size.in.bytes")
       .internal()
       .doc(
         "Must provide default value since non-execution operations " +
@@ -1376,7 +1354,7 @@ object GlutenConfig {
       .createWithDefaultString("0")
 
   val COLUMNAR_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES =
-    buildConf(GlutenConfig.GLUTEN_CONSERVATIVE_TASK_OFFHEAP_SIZE_IN_BYTES_KEY)
+    buildConf("spark.gluten.memory.conservative.task.offHeap.size.in.bytes")
       .internal()
       .doc(
         "Must provide default value since non-execution operations " +
@@ -1609,7 +1587,7 @@ object GlutenConfig {
     
buildConf("spark.gluten.sql.columnar.backend.velox.resizeBatches.shuffleInput")
       .internal()
       .doc(s"If true, combine small columnar batches together before sending 
to shuffle. " +
-        s"The default minimum output batch size is equal to 0.8 * 
$GLUTEN_MAX_BATCH_SIZE_KEY")
+        s"The default minimum output batch size is equal to 0.8 * 
${COLUMNAR_MAX_BATCH_SIZE.key}")
       .booleanConf
       .createWithDefault(true)
 
@@ -1693,19 +1671,19 @@ object GlutenConfig {
       .createWithDefault("DEBUG")
 
   val DEBUG_ENABLED =
-    buildConf(GLUTEN_DEBUG_MODE)
+    buildConf("spark.gluten.sql.debug")
       .internal()
       .booleanConf
       .createWithDefault(false)
 
   val DEBUG_KEEP_JNI_WORKSPACE =
-    buildStaticConf(GLUTEN_DEBUG_KEEP_JNI_WORKSPACE)
+    buildStaticConf("spark.gluten.sql.debug.keepJniWorkspace")
       .internal()
       .booleanConf
       .createWithDefault(false)
 
   val DEBUG_KEEP_JNI_WORKSPACE_DIR =
-    buildStaticConf(GLUTEN_DEBUG_KEEP_JNI_WORKSPACE_DIR)
+    buildStaticConf("spark.gluten.sql.debug.keepJniWorkspaceDir")
       .internal()
       .stringConf
       .createWithDefault("/tmp")
@@ -1735,7 +1713,7 @@ object GlutenConfig {
       .createWithDefault("")
 
   val BENCHMARK_SAVE_DIR =
-    buildConf(GLUTEN_SAVE_DIR)
+    buildConf("spark.gluten.saveDir")
       .internal()
       .stringConf
       .createWithDefault("")
@@ -1797,7 +1775,7 @@ object GlutenConfig {
 
   // FIXME: This only works with CH backend.
   val EXTENDED_EXPRESSION_TRAN_CONF =
-    buildConf(GLUTEN_EXTENDED_EXPRESSION_TRAN_CONF)
+    buildConf("spark.gluten.sql.columnar.extended.expressions.transformer")
       .doc("A class for the extended expressions transformer.")
       .stringConf
       .createWithDefaultString("")
@@ -2169,7 +2147,7 @@ object GlutenConfig {
       .createWithDefault(true)
 
   val COST_EVALUATOR_ENABLED =
-    buildStaticConf(GlutenConfig.GLUTEN_COST_EVALUATOR_ENABLED)
+    buildStaticConf("spark.gluten.sql.adaptive.costEvaluator.enabled")
       .internal()
       .doc(
         "If true, use " +
@@ -2177,10 +2155,10 @@ object GlutenConfig {
           "evaluator class, else follow the configuration " +
           "spark.sql.adaptive.customCostEvaluatorClass.")
       .booleanConf
-      .createWithDefault(GLUTEN_COST_EVALUATOR_ENABLED_DEFAULT_VALUE)
+      .createWithDefault(true)
 
   val DYNAMIC_OFFHEAP_SIZING_ENABLED =
-    buildConf(GlutenConfig.GLUTEN_DYNAMIC_OFFHEAP_SIZING_ENABLED)
+    buildConf("spark.gluten.memory.dynamic.offHeap.sizing.enabled")
       .internal()
       .doc(
         "Experimental: When set to true, the offheap config 
(spark.memory.offHeap.size) will " +
@@ -2197,7 +2175,7 @@ object GlutenConfig {
       .createWithDefault(false)
 
   val DYNAMIC_OFFHEAP_SIZING_MEMORY_FRACTION =
-    buildConf(GlutenConfig.GLUTEN_DYNAMIC_OFFHEAP_SIZING_MEMORY_FRACTION)
+    buildConf("spark.gluten.memory.dynamic.offHeap.sizing.memory.fraction")
       .internal()
       .doc(
         "Experimental: Determines the memory fraction used to determine the 
total " +
diff --git 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeExceptionSuite.scala
 b/shims/common/src/main/scala/org/apache/gluten/config/ReservedKeys.scala
similarity index 54%
copy from 
backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeExceptionSuite.scala
copy to shims/common/src/main/scala/org/apache/gluten/config/ReservedKeys.scala
index 3db2684037..6635a8b20a 100644
--- 
a/backends-clickhouse/src/test/scala/org/apache/gluten/execution/GlutenClickHouseNativeExceptionSuite.scala
+++ b/shims/common/src/main/scala/org/apache/gluten/config/ReservedKeys.scala
@@ -14,27 +14,24 @@
  * See the License for the specific language governing permissions and
  * limitations under the License.
  */
-package org.apache.gluten.execution
+package org.apache.gluten.config
 
-import org.apache.gluten.config.GlutenConfig
-import org.apache.gluten.utils.{TestExceptionUtils, UTSystemParameters}
-
-import org.apache.spark.SparkConf
+/**
+ * The ReservedKeys retains the configuration constants used internally by 
Gluten, which are not
+ * exposed to users.
+ *
+ * TODO: Other internal constant key should be moved here.
+ */
+object ReservedKeys {
 
-class GlutenClickHouseNativeExceptionSuite extends 
GlutenClickHouseWholeStageTransformerSuite {
+  // Tokens of current user, split by `\0`
+  val GLUTEN_UGI_TOKENS = "spark.gluten.ugi.tokens"
 
-  override protected def sparkConf: SparkConf = {
-    super.sparkConf
-      .set(GlutenConfig.GLUTEN_LIB_PATH, UTSystemParameters.clickHouseLibPath)
-  }
+  // Principal of current user
+  val GLUTEN_UGI_USERNAME = "spark.gluten.ugi.username"
 
-  test("native exception caught by jvm") {
-    try {
-      TestExceptionUtils.generateNativeException()
-      assert(false)
-    } catch {
-      case e: Exception =>
-        assert(e.getMessage.contains("test native exception"))
-    }
-  }
+  // Shuffle writer type.
+  val GLUTEN_HASH_SHUFFLE_WRITER = "hash"
+  val GLUTEN_SORT_SHUFFLE_WRITER = "sort"
+  val GLUTEN_RSS_SORT_SHUFFLE_WRITER = "rss_sort"
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to