vinothchandar commented on a change in pull request #2833:
URL: https://github.com/apache/hudi/pull/2833#discussion_r637565842



##########
File path: 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieMetricsPrometheusConfig.java
##########
@@ -77,25 +99,13 @@ public Builder fromProperties(Properties props) {
 
     public HoodieMetricsPrometheusConfig build() {
       HoodieMetricsPrometheusConfig config = new 
HoodieMetricsPrometheusConfig(props);
-      setDefaultOnCondition(props, !props.containsKey(PROMETHEUS_PORT), 
PROMETHEUS_PORT,
-              String.valueOf(DEFAULT_PROMETHEUS_PORT));
-      setDefaultOnCondition(props, !props.containsKey(PUSHGATEWAY_HOST),
-              PUSHGATEWAY_HOST,
-              DEFAULT_PUSHGATEWAY_HOST);
-      setDefaultOnCondition(props, !props.containsKey(PUSHGATEWAY_PORT),
-              PUSHGATEWAY_PORT,
-              String.valueOf(DEFAULT_PUSHGATEWAY_PORT));
-      setDefaultOnCondition(props, 
!props.containsKey(PUSHGATEWAY_REPORT_PERIOD_SECONDS),
-              PUSHGATEWAY_REPORT_PERIOD_SECONDS,
-              String.valueOf(DEFAULT_PUSHGATEWAY_REPORT_PERIOD_SECONDS));
-      setDefaultOnCondition(props, 
!props.containsKey(PUSHGATEWAY_DELETE_ON_SHUTDOWN),
-              PUSHGATEWAY_DELETE_ON_SHUTDOWN,
-              String.valueOf(DEFAULT_PUSHGATEWAY_DELETE_ON_SHUTDOWN));
-      setDefaultOnCondition(props, !props.containsKey(PUSHGATEWAY_JOB_NAME),
-              PUSHGATEWAY_JOB_NAME, DEFAULT_PUSHGATEWAY_JOB_NAME);
-      setDefaultOnCondition(props, 
!props.containsKey(PUSHGATEWAY_RANDOM_JOB_NAME_SUFFIX),
-              PUSHGATEWAY_RANDOM_JOB_NAME_SUFFIX,
-              String.valueOf(DEFAULT_PUSHGATEWAY_RANDOM_JOB_NAME_SUFFIX));
+      setDefaultValue(props, PROMETHEUS_PORT);

Review comment:
       is there a way to have the `ConfigOption` members be added to a static 
list or something when they are built. That way we need not require someone to 
do the `setDefaultValue()` calls? i.e 
   
   ```
   class HoodieMetricsPrometheusConfig {
      
      private static final List<ConfigOption<?>> configRegistry();
   
     // this internally adds this to configRegistry
      public static final ConfigOption<Integer> PROMETHEUS_PORT = ConfigOption
         .key(PROMETHEUS_PREFIX + ".port")
         .defaultValue(9090)
         .withVersion("0.6.0")
         .withDescription("");
   
   
     public ...... build() {
        for (ConfigOption<?> cOpt : configRegistry) {
           setDefaultValue(props, cOpt);
         }
     } 
     
   ```
   }
   
   
   

##########
File path: 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieBootstrapConfig.java
##########
@@ -33,30 +34,63 @@
 /**
  * Bootstrap specific configs.
  */
-public class HoodieBootstrapConfig extends DefaultHoodieConfig {
-
-  public static final String BOOTSTRAP_BASE_PATH_PROP = 
"hoodie.bootstrap.base.path";
-  public static final String BOOTSTRAP_MODE_SELECTOR = 
"hoodie.bootstrap.mode.selector";
-  public static final String FULL_BOOTSTRAP_INPUT_PROVIDER = 
"hoodie.bootstrap.full.input.provider";
-  public static final String DEFAULT_FULL_BOOTSTRAP_INPUT_PROVIDER = 
"org.apache.hudi.bootstrap.SparkParquetBootstrapDataProvider";
-  public static final String BOOTSTRAP_KEYGEN_CLASS = 
"hoodie.bootstrap.keygen.class";
-  public static final String BOOTSTRAP_PARTITION_PATH_TRANSLATOR_CLASS =
-      "hoodie.bootstrap.partitionpath.translator.class";
-  public static final String DEFAULT_BOOTSTRAP_PARTITION_PATH_TRANSLATOR_CLASS 
=
-      IdentityBootstrapPartitionPathTranslator.class.getName();
-
-  public static final String BOOTSTRAP_PARALLELISM = 
"hoodie.bootstrap.parallelism";
-  public static final String DEFAULT_BOOTSTRAP_PARALLELISM = "1500";
-
-  // Used By BootstrapRegexModeSelector class. When a partition path matches 
the regex, the corresponding
-  // mode will be used. Otherwise, the alternative mode will be used.
-  public static final String BOOTSTRAP_MODE_SELECTOR_REGEX = 
"hoodie.bootstrap.mode.selector.regex";
-  public static final String BOOTSTRAP_MODE_SELECTOR_REGEX_MODE = 
"hoodie.bootstrap.mode.selector.regex.mode";
-  public static final String DEFAULT_BOOTSTRAP_MODE_SELECTOR_REGEX = ".*";
-  public static final String DEFAULT_BOOTSTRAP_MODE_SELECTOR_REGEX_MODE = 
BootstrapMode.METADATA_ONLY.name();
-
-  public static final String BOOTSTRAP_INDEX_CLASS_PROP = 
"hoodie.bootstrap.index.class";
-  public static final String DEFAULT_BOOTSTRAP_INDEX_CLASS = 
HFileBootstrapIndex.class.getName();
+public class HoodieBootstrapConfig extends HoodieConfig {
+
+  public static final ConfigOption<String> BOOTSTRAP_BASE_PATH_PROP = 
ConfigOption
+      .key("hoodie.bootstrap.base.path")
+      .noDefaultValue()

Review comment:
       does this mean its required? (an error is thrown if not set?)

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
##########
@@ -50,52 +52,95 @@
  * @see HoodieTableMetaClient
  * @since 0.3.0
  */
-public class HoodieTableConfig implements Serializable {
+public class HoodieTableConfig extends HoodieConfig implements Serializable {
 
   private static final Logger LOG = 
LogManager.getLogger(HoodieTableConfig.class);
 
   public static final String HOODIE_PROPERTIES_FILE = "hoodie.properties";
-  public static final String HOODIE_TABLE_NAME_PROP_NAME = "hoodie.table.name";
-  public static final String HOODIE_TABLE_TYPE_PROP_NAME = "hoodie.table.type";
-  public static final String HOODIE_TABLE_VERSION_PROP_NAME = 
"hoodie.table.version";
-  public static final String HOODIE_TABLE_PRECOMBINE_FIELD = 
"hoodie.table.precombine.field";
-  public static final String HOODIE_TABLE_PARTITION_COLUMNS = 
"hoodie.table.partition.columns";
-
-  @Deprecated
-  public static final String HOODIE_RO_FILE_FORMAT_PROP_NAME = 
"hoodie.table.ro.file.format";
-  @Deprecated
-  public static final String HOODIE_RT_FILE_FORMAT_PROP_NAME = 
"hoodie.table.rt.file.format";
-  public static final String HOODIE_BASE_FILE_FORMAT_PROP_NAME = 
"hoodie.table.base.file.format";
-  public static final String HOODIE_LOG_FILE_FORMAT_PROP_NAME = 
"hoodie.table.log.file.format";
-  public static final String HOODIE_TIMELINE_LAYOUT_VERSION = 
"hoodie.timeline.layout.version";
-  public static final String HOODIE_PAYLOAD_CLASS_PROP_NAME = 
"hoodie.compaction.payload.class";
-  public static final String HOODIE_ARCHIVELOG_FOLDER_PROP_NAME = 
"hoodie.archivelog.folder";
-  public static final String HOODIE_BOOTSTRAP_INDEX_ENABLE = 
"hoodie.bootstrap.index.enable";
-  public static final String HOODIE_BOOTSTRAP_INDEX_CLASS_PROP_NAME = 
"hoodie.bootstrap.index.class";
-  public static final String HOODIE_BOOTSTRAP_BASE_PATH = 
"hoodie.bootstrap.base.path";
-
-  public static final HoodieTableType DEFAULT_TABLE_TYPE = 
HoodieTableType.COPY_ON_WRITE;
-  public static final HoodieTableVersion DEFAULT_TABLE_VERSION = 
HoodieTableVersion.ZERO;
-  public static final HoodieFileFormat DEFAULT_BASE_FILE_FORMAT = 
HoodieFileFormat.PARQUET;
-  public static final HoodieFileFormat DEFAULT_LOG_FILE_FORMAT = 
HoodieFileFormat.HOODIE_LOG;
-  public static final String DEFAULT_PAYLOAD_CLASS = 
OverwriteWithLatestAvroPayload.class.getName();
-  public static final String NO_OP_BOOTSTRAP_INDEX_CLASS = 
NoOpBootstrapIndex.class.getName();
-  public static final String DEFAULT_BOOTSTRAP_INDEX_CLASS = 
HFileBootstrapIndex.class.getName();
-  public static final String DEFAULT_ARCHIVELOG_FOLDER = "";
 
-  private Properties props;
+  public static final ConfigOption<String> HOODIE_TABLE_NAME_PROP_NAME = 
ConfigOption
+      .key("hoodie.table.name")
+      .noDefaultValue()
+      .withDescription("Table name that will be used for registering with 
Hive. Needs to be same across runs.");
+
+  public static final ConfigOption<HoodieTableType> 
HOODIE_TABLE_TYPE_PROP_NAME = ConfigOption
+      .key("hoodie.table.type")
+      .defaultValue(HoodieTableType.COPY_ON_WRITE)
+      .withDescription("The table type for the underlying data, for this 
write. This can’t change between writes.");
+
+  public static final ConfigOption<HoodieTableVersion> 
HOODIE_TABLE_VERSION_PROP_NAME = ConfigOption
+      .key("hoodie.table.version")
+      .defaultValue(HoodieTableVersion.ZERO)
+      .withDescription("");
+
+  public static final ConfigOption<String> HOODIE_TABLE_PRECOMBINE_FIELD = 
ConfigOption
+      .key("hoodie.table.precombine.field")
+      .noDefaultValue()
+      .withDescription("Field used in preCombining before actual write. When 
two records have the same key value, "
+          + "we will pick the one with the largest value for the precombine 
field, determined by Object.compareTo(..)");
+
+  public static final ConfigOption<String> HOODIE_TABLE_PARTITION_COLUMNS = 
ConfigOption
+      .key("hoodie.table.partition.columns")
+      .noDefaultValue()
+      .withDescription("Partition path field. Value to be used at the 
partitionPath component of HoodieKey. "
+          + "Actual value ontained by invoking .toString()");
+
+  public static final ConfigOption<HoodieFileFormat> 
HOODIE_BASE_FILE_FORMAT_PROP_NAME = ConfigOption
+      .key("hoodie.table.base.file.format")
+      .defaultValue(HoodieFileFormat.PARQUET)
+      .withAlternatives("hoodie.table.ro.file.format")
+      .withDescription("");
+
+  public static final ConfigOption<HoodieFileFormat> 
HOODIE_LOG_FILE_FORMAT_PROP_NAME = ConfigOption

Review comment:
       these should not be called `.._PROP_NAME` anymore? we can also 
consistently name this as commented above?
   

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/config/ConfigOption.java
##########
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.config;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.exception.HoodieException;
+
+import java.io.Serializable;
+import java.util.function.Function;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * ConfigOption describes a configuration parameter. It contains the 
configuration
+ * key, deprecated older versions of the key, and an optional default value 
for the configuration,
+ * configuration descriptions and also the an infer mechanism to infer the 
configuration value
+ * based on other configurations.
+ *
+ * @param <T> The type of the default value.
+ */
+public class ConfigOption<T> implements Serializable {
+
+  private final String key;
+
+  private final T defaultValue;
+
+  private final String description;
+
+  private final Option<String> version;
+
+  private final String[] alternatives;

Review comment:
       Should this be a `ConfigOption<?>[] alternatives`? Otherwise, its hard 
to actually look up any default value for alternatives etc, to really leverage 
this feature.  

##########
File path: 
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
##########
@@ -292,99 +296,233 @@ object DataSourceWriteOptions {
    * When set to true, will perform write operations directly using the spark 
native `Row` representation.
    * By default, false (will be enabled as default in a future release)
    */
-  val ENABLE_ROW_WRITER_OPT_KEY = "hoodie.datasource.write.row.writer.enable"
-  val DEFAULT_ENABLE_ROW_WRITER_OPT_VAL = "false"
+  val ENABLE_ROW_WRITER_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.write.row.writer.enable")
+    .defaultValue("false")
+    .withDescription("")
 
   /**
     * Option keys beginning with this prefix, are automatically added to the 
commit/deltacommit metadata.
     * This is useful to store checkpointing information, in a consistent way 
with the hoodie timeline
     */
-  val COMMIT_METADATA_KEYPREFIX_OPT_KEY = 
"hoodie.datasource.write.commitmeta.key.prefix"
-  val DEFAULT_COMMIT_METADATA_KEYPREFIX_OPT_VAL = "_"
+  val COMMIT_METADATA_KEYPREFIX_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.write.commitmeta.key.prefix")
+    .defaultValue("_")
+    .withDescription("Option keys beginning with this prefix, are 
automatically added to the commit/deltacommit metadata. " +
+      "This is useful to store checkpointing information, in a consistent way 
with the hudi timeline")
 
   /**
     * Flag to indicate whether to drop duplicates upon insert.
     * By default insert will accept duplicates, to gain extra performance.
     */
-  val INSERT_DROP_DUPS_OPT_KEY = 
"hoodie.datasource.write.insert.drop.duplicates"
-  val DEFAULT_INSERT_DROP_DUPS_OPT_VAL = "false"
+  val INSERT_DROP_DUPS_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.write.insert.drop.duplicates")
+    .defaultValue("false")
+    .withDescription("If set to true, filters out all duplicate records from 
incoming dataframe, during insert operations.")
 
   /**
     * Flag to indicate how many times streaming job should retry for a failed 
microbatch
     * By default 3
     */
-  val STREAMING_RETRY_CNT_OPT_KEY = 
"hoodie.datasource.write.streaming.retry.count"
-  val DEFAULT_STREAMING_RETRY_CNT_OPT_VAL = "3"
+  val STREAMING_RETRY_CNT_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.write.streaming.retry.count")
+    .defaultValue("3")
+    .withDescription("")
 
   /**
     * Flag to indicate how long (by millisecond) before a retry should issued 
for failed microbatch
     * By default 2000 and it will be doubled by every retry
     */
-  val STREAMING_RETRY_INTERVAL_MS_OPT_KEY = 
"hoodie.datasource.write.streaming.retry.interval.ms"
-  val DEFAULT_STREAMING_RETRY_INTERVAL_MS_OPT_VAL = "2000"
+  val STREAMING_RETRY_INTERVAL_MS_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.write.streaming.retry.interval.ms")
+    .defaultValue("2000")
+    .withDescription("")
 
   /**
     * Flag to indicate whether to ignore any non exception error (e.g. 
writestatus error)
     * within a streaming microbatch
     * By default true (in favor of streaming progressing over data integrity)
     */
-  val STREAMING_IGNORE_FAILED_BATCH_OPT_KEY = 
"hoodie.datasource.write.streaming.ignore.failed.batch"
-  val DEFAULT_STREAMING_IGNORE_FAILED_BATCH_OPT_VAL = "true"
-  val META_SYNC_CLIENT_TOOL_CLASS = "hoodie.meta.sync.client.tool.class"
-  val DEFAULT_META_SYNC_CLIENT_TOOL_CLASS = classOf[HiveSyncTool].getName
+  val STREAMING_IGNORE_FAILED_BATCH_OPT_KEY: ConfigOption[String] = 
ConfigOption
+    .key("hoodie.datasource.write.streaming.ignore.failed.batch")
+    .defaultValue("true")
+    .withDescription("")
+
+  val META_SYNC_CLIENT_TOOL_CLASS: ConfigOption[String] = ConfigOption
+    .key("hoodie.meta.sync.client.tool.class")
+    .defaultValue(classOf[HiveSyncTool].getName)
+    .withDescription("")
 
   // HIVE SYNC SPECIFIC CONFIGS
-  //NOTE: DO NOT USE uppercase for the keys as they are internally 
lower-cased. Using upper-cases causes
+  // NOTE: DO NOT USE uppercase for the keys as they are internally 
lower-cased. Using upper-cases causes
   // unexpected issues with config getting reset
-  val HIVE_SYNC_ENABLED_OPT_KEY = "hoodie.datasource.hive_sync.enable"
-  val META_SYNC_ENABLED_OPT_KEY = "hoodie.datasource.meta.sync.enable"
-  val HIVE_DATABASE_OPT_KEY = "hoodie.datasource.hive_sync.database"
-  val HIVE_TABLE_OPT_KEY = "hoodie.datasource.hive_sync.table"
-  val HIVE_BASE_FILE_FORMAT_OPT_KEY = 
"hoodie.datasource.hive_sync.base_file_format"
-  val HIVE_USER_OPT_KEY = "hoodie.datasource.hive_sync.username"
-  val HIVE_PASS_OPT_KEY = "hoodie.datasource.hive_sync.password"
-  val HIVE_URL_OPT_KEY = "hoodie.datasource.hive_sync.jdbcurl"
-  val HIVE_PARTITION_FIELDS_OPT_KEY = 
"hoodie.datasource.hive_sync.partition_fields"
-  val HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY = 
"hoodie.datasource.hive_sync.partition_extractor_class"
-  val HIVE_ASSUME_DATE_PARTITION_OPT_KEY = 
"hoodie.datasource.hive_sync.assume_date_partitioning"
-  val HIVE_USE_PRE_APACHE_INPUT_FORMAT_OPT_KEY = 
"hoodie.datasource.hive_sync.use_pre_apache_input_format"
-  val HIVE_USE_JDBC_OPT_KEY = "hoodie.datasource.hive_sync.use_jdbc"
-  val HIVE_AUTO_CREATE_DATABASE_OPT_KEY = 
"hoodie.datasource.hive_sync.auto_create_database"
-  val HIVE_IGNORE_EXCEPTIONS_OPT_KEY = 
"hoodie.datasource.hive_sync.ignore_exceptions"
-  val HIVE_SKIP_RO_SUFFIX = "hoodie.datasource.hive_sync.skip_ro_suffix"
-  val HIVE_SUPPORT_TIMESTAMP = "hoodie.datasource.hive_sync.support_timestamp"
-  val HIVE_TABLE_PROPERTIES = "hoodie.datasource.hive_sync.table_properties"
-  val HIVE_TABLE_SERDE_PROPERTIES = 
"hoodie.datasource.hive_sync.serde_properties"
-  val HIVE_SYNC_AS_DATA_SOURCE_TABLE = 
"hoodie.datasource.hive_sync.sync_as_datasource"
-
-  // DEFAULT FOR HIVE SPECIFIC CONFIGS
-  val DEFAULT_HIVE_SYNC_ENABLED_OPT_VAL = "false"
-  val DEFAULT_META_SYNC_ENABLED_OPT_VAL = "false"
-  val DEFAULT_HIVE_DATABASE_OPT_VAL = "default"
-  val DEFAULT_HIVE_TABLE_OPT_VAL = "unknown"
-  val DEFAULT_HIVE_BASE_FILE_FORMAT_OPT_VAL = "PARQUET"
-  val DEFAULT_HIVE_USER_OPT_VAL = "hive"
-  val DEFAULT_HIVE_PASS_OPT_VAL = "hive"
-  val DEFAULT_HIVE_URL_OPT_VAL = "jdbc:hive2://localhost:10000"
-  val DEFAULT_HIVE_PARTITION_FIELDS_OPT_VAL = ""
-  val DEFAULT_HIVE_PARTITION_EXTRACTOR_CLASS_OPT_VAL = 
classOf[SlashEncodedDayPartitionValueExtractor].getCanonicalName
-  val DEFAULT_HIVE_ASSUME_DATE_PARTITION_OPT_VAL = "false"
-  val DEFAULT_USE_PRE_APACHE_INPUT_FORMAT_OPT_VAL = "false"
-  val DEFAULT_HIVE_USE_JDBC_OPT_VAL = "true"
-  val DEFAULT_HIVE_AUTO_CREATE_DATABASE_OPT_KEY = "true"
-  val DEFAULT_HIVE_IGNORE_EXCEPTIONS_OPT_KEY = "false"
-  val DEFAULT_HIVE_SKIP_RO_SUFFIX_VAL = "false"
-  val DEFAULT_HIVE_SUPPORT_TIMESTAMP = "false"
-  val DEFAULT_HIVE_SYNC_AS_DATA_SOURCE_TABLE = "true"
+
+  val HIVE_SYNC_ENABLED_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.enable")
+    .defaultValue("false")
+    .withDescription("When set to true, register/sync the table to Apache Hive 
metastore")
+
+  val META_SYNC_ENABLED_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.meta.sync.enable")
+    .defaultValue("false")
+    .withDescription("")
+
+  val HIVE_DATABASE_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.database")
+    .defaultValue("default")
+    .withDescription("database to sync to")
+
+  val HIVE_TABLE_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.table")
+    .defaultValue("unknown")
+    .withDescription("table to sync to")
+
+  val HIVE_BASE_FILE_FORMAT_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.base_file_format")
+    .defaultValue("PARQUET")
+    .withDescription("")
+
+  val HIVE_USER_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.username")
+    .defaultValue("hive")
+    .withDescription("hive user name to use")
+
+  val HIVE_PASS_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.password")
+    .defaultValue("hive")
+    .withDescription("hive password to use")
+
+  val HIVE_URL_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.jdbcurl")
+    .defaultValue("jdbc:hive2://localhost:10000")
+    .withDescription("Hive metastore url")
+
+  val HIVE_PARTITION_FIELDS_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.partition_fields")
+    .defaultValue("")
+    .withDescription("field in the table to use for determining hive partition 
columns.")
+
+  val HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY: ConfigOption[String] = 
ConfigOption
+    .key("hoodie.datasource.hive_sync.partition_extractor_class")
+    
.defaultValue(classOf[SlashEncodedDayPartitionValueExtractor].getCanonicalName)
+    .withDescription("")
+
+  val HIVE_ASSUME_DATE_PARTITION_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.assume_date_partitioning")
+    .defaultValue("false")
+    .withDescription("Assume partitioning is yyyy/mm/dd")
+
+  val HIVE_USE_PRE_APACHE_INPUT_FORMAT_OPT_KEY: ConfigOption[String] = 
ConfigOption
+    .key("hoodie.datasource.hive_sync.use_pre_apache_input_format")
+    .defaultValue("false")
+    .withDescription("")
+
+  val HIVE_USE_JDBC_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.use_jdbc")
+    .defaultValue("true")
+    .withDescription("Use JDBC when hive synchronization is enabled")
+
+  val HIVE_AUTO_CREATE_DATABASE_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.auto_create_database")
+    .defaultValue("true")
+    .withDescription("Auto create hive database if does not exists")
+
+  val HIVE_IGNORE_EXCEPTIONS_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.ignore_exceptions")
+    .defaultValue("false")
+    .withDescription("")
+
+  val HIVE_SKIP_RO_SUFFIX: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.skip_ro_suffix")
+    .defaultValue("false")
+    .withDescription("Skip the _ro suffix for Read optimized table, when 
registering")
+
+  val HIVE_SUPPORT_TIMESTAMP: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.support_timestamp")
+    .defaultValue("false")
+    .withDescription("‘INT64’ with original type TIMESTAMP_MICROS is converted 
to hive ‘timestamp’ type. " +
+      "Disabled by default for backward compatibility.")
+
+  val HIVE_TABLE_PROPERTIES: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.table_properties")
+    .noDefaultValue()
+    .withDescription("")
+
+  val HIVE_TABLE_SERDE_PROPERTIES: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.serde_properties")
+    .noDefaultValue()
+    .withDescription("")
+
+  val HIVE_SYNC_AS_DATA_SOURCE_TABLE: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.hive_sync.sync_as_datasource")
+    .defaultValue("true")
+    .withDescription("")
 
   // Async Compaction - Enabled by default for MOR
-  val ASYNC_COMPACT_ENABLE_OPT_KEY = 
"hoodie.datasource.compaction.async.enable"
-  val DEFAULT_ASYNC_COMPACT_ENABLE_OPT_VAL = "true"
+  val ASYNC_COMPACT_ENABLE_OPT_KEY: ConfigOption[String] = ConfigOption
+    .key("hoodie.datasource.compaction.async.enable")
+    .defaultValue("true")
+    .withDescription("")
 
   // Avro Kafka Source configs
-  val KAFKA_AVRO_VALUE_DESERIALIZER = 
"hoodie.deltastreamer.source.kafka.value.deserializer.class"
+  val KAFKA_AVRO_VALUE_DESERIALIZER: ConfigOption[String] = ConfigOption
+    .key("hoodie.deltastreamer.source.kafka.value.deserializer.class")
+    .noDefaultValue()
+    .withDescription("")
 
   // Schema provider class to be set to be used in custom kakfa deserializer
-  val SCHEMA_PROVIDER_CLASS_PROP = "hoodie.deltastreamer.schemaprovider.class"
+  val SCHEMA_PROVIDER_CLASS_PROP: ConfigOption[String] = ConfigOption
+    .key("hoodie.deltastreamer.schemaprovider.class")
+    .noDefaultValue()
+    .withDescription("")
+
+}
+
+object DataSourceOptionsHelper {
+
+  private val log = LogManager.getLogger(DataSourceOptionsHelper.getClass)
+
+  // put all the configs with alternatives here
+  val allConfigsWithAlternatives = List(
+    DataSourceReadOptions.QUERY_TYPE_OPT_KEY,
+    DataSourceWriteOptions.TABLE_TYPE_OPT_KEY,
+    HoodieTableConfig.HOODIE_BASE_FILE_FORMAT_PROP_NAME,
+    HoodieTableConfig.HOODIE_LOG_FILE_FORMAT_PROP_NAME
+  )
 
+  // put all the deprecated configs here
+  val allDeprecatedConfigs: Set[String] = Set(
+    ConsistencyGuardConfig.CONSISTENCY_CHECK_ENABLED_PROP.key
+  )
+
+  // maps the deprecated config name to its latest name
+  val allAlternatives: Map[String, String] = {
+    val alterMap = scala.collection.mutable.Map[String, String]()
+    allConfigsWithAlternatives.foreach(cfg => 
cfg.getAlternatives.foreach(alternative => alterMap(alternative) = cfg.key))
+    alterMap.toMap
+  }
+
+  val viewTypeValueMap: Map[String, String] = Map(
+    DataSourceReadOptions.VIEW_TYPE_READ_OPTIMIZED_OPT_VAL -> 
DataSourceReadOptions.QUERY_TYPE_SNAPSHOT_OPT_VAL,
+    DataSourceReadOptions.VIEW_TYPE_INCREMENTAL_OPT_VAL -> 
DataSourceReadOptions.QUERY_TYPE_INCREMENTAL_OPT_VAL,
+    DataSourceReadOptions.VIEW_TYPE_REALTIME_OPT_VAL -> 
DataSourceReadOptions.QUERY_TYPE_SNAPSHOT_OPT_VAL)
+
+  def translateConfigurations(optParams: Map[String, String]): Map[String, 
String] = {

Review comment:
       ah you have generalized these . got it. ignore my last comment.

##########
File path: 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieBootstrapConfig.java
##########
@@ -33,30 +34,63 @@
 /**
  * Bootstrap specific configs.
  */
-public class HoodieBootstrapConfig extends DefaultHoodieConfig {
-
-  public static final String BOOTSTRAP_BASE_PATH_PROP = 
"hoodie.bootstrap.base.path";
-  public static final String BOOTSTRAP_MODE_SELECTOR = 
"hoodie.bootstrap.mode.selector";
-  public static final String FULL_BOOTSTRAP_INPUT_PROVIDER = 
"hoodie.bootstrap.full.input.provider";
-  public static final String DEFAULT_FULL_BOOTSTRAP_INPUT_PROVIDER = 
"org.apache.hudi.bootstrap.SparkParquetBootstrapDataProvider";
-  public static final String BOOTSTRAP_KEYGEN_CLASS = 
"hoodie.bootstrap.keygen.class";
-  public static final String BOOTSTRAP_PARTITION_PATH_TRANSLATOR_CLASS =
-      "hoodie.bootstrap.partitionpath.translator.class";
-  public static final String DEFAULT_BOOTSTRAP_PARTITION_PATH_TRANSLATOR_CLASS 
=
-      IdentityBootstrapPartitionPathTranslator.class.getName();
-
-  public static final String BOOTSTRAP_PARALLELISM = 
"hoodie.bootstrap.parallelism";
-  public static final String DEFAULT_BOOTSTRAP_PARALLELISM = "1500";
-
-  // Used By BootstrapRegexModeSelector class. When a partition path matches 
the regex, the corresponding
-  // mode will be used. Otherwise, the alternative mode will be used.
-  public static final String BOOTSTRAP_MODE_SELECTOR_REGEX = 
"hoodie.bootstrap.mode.selector.regex";
-  public static final String BOOTSTRAP_MODE_SELECTOR_REGEX_MODE = 
"hoodie.bootstrap.mode.selector.regex.mode";
-  public static final String DEFAULT_BOOTSTRAP_MODE_SELECTOR_REGEX = ".*";
-  public static final String DEFAULT_BOOTSTRAP_MODE_SELECTOR_REGEX_MODE = 
BootstrapMode.METADATA_ONLY.name();
-
-  public static final String BOOTSTRAP_INDEX_CLASS_PROP = 
"hoodie.bootstrap.index.class";
-  public static final String DEFAULT_BOOTSTRAP_INDEX_CLASS = 
HFileBootstrapIndex.class.getName();
+public class HoodieBootstrapConfig extends HoodieConfig {
+
+  public static final ConfigOption<String> BOOTSTRAP_BASE_PATH_PROP = 
ConfigOption
+      .key("hoodie.bootstrap.base.path")
+      .noDefaultValue()
+      .withVersion("0.6.0")

Review comment:
       Can we call this `sinceVersion()` if this stands for when we introduced 
this? 
   also add a `deprecatedAfter()` (so we can mark this for configs we deprecate 
going forward? It ll show up in the docs nicely). 
   
   While we are at it, do we need a `Version` class for each release version? 
instead of just using a string? (I am ok either way. leave it to you)

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/config/HoodieConfig.java
##########
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.config;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * This class deals with {@link org.apache.hudi.common.config.ConfigOption} 
and provides get/set functionalities.
+ */
+public class HoodieConfig implements Serializable {
+
+  private static final Logger LOG = LogManager.getLogger(HoodieConfig.class);
+
+  protected Properties props;
+
+  public HoodieConfig() {
+    this.props = new Properties();
+  }
+
+  public HoodieConfig(Properties props) {
+    this.props = props;
+  }
+
+  public static void setDefaultOnCondition(Properties props, boolean 
condition, HoodieConfig config) {
+    if (condition) {
+      props.putAll(config.getProps());
+    }
+  }
+
+  public static <T> void set(Properties props, ConfigOption<T> cfg, String 
val) {

Review comment:
       set is too abstract. may be `setValue()`?

##########
File path: 
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieHBaseIndexConfig.java
##########
@@ -18,107 +18,133 @@
 
 package org.apache.hudi.config;
 
-import org.apache.hudi.common.config.DefaultHoodieConfig;
+import org.apache.hudi.common.config.ConfigOption;
+import org.apache.hudi.common.config.HoodieConfig;
 import org.apache.hudi.index.hbase.DefaultHBaseQPSResourceAllocator;
 
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.util.Properties;
 
-public class HoodieHBaseIndexConfig extends DefaultHoodieConfig {
-
-  public static final String HBASE_ZKQUORUM_PROP = 
"hoodie.index.hbase.zkquorum";
-  public static final String HBASE_ZKPORT_PROP = "hoodie.index.hbase.zkport";
-  public static final String HBASE_TABLENAME_PROP = "hoodie.index.hbase.table";
-  public static final String HBASE_GET_BATCH_SIZE_PROP = 
"hoodie.index.hbase.get.batch.size";
-  public static final String HBASE_ZK_ZNODEPARENT = 
"hoodie.index.hbase.zknode.path";
-  /**
-   * Note that if HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP is set to true, this 
batch size will not be honored for HBase
-   * Puts.
-   */
-  public static final String HBASE_PUT_BATCH_SIZE_PROP = 
"hoodie.index.hbase.put.batch.size";
-
-  /**
-   * Property to set which implementation of HBase QPS resource allocator to 
be used.
-   */
-  public static final String HBASE_INDEX_QPS_ALLOCATOR_CLASS = 
"hoodie.index.hbase.qps.allocator.class";
-  public static final String DEFAULT_HBASE_INDEX_QPS_ALLOCATOR_CLASS = 
DefaultHBaseQPSResourceAllocator.class.getName();
-  /**
-   * Property to set to enable auto computation of put batch size.
-   */
-  public static final String HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE_PROP = 
"hoodie.index.hbase.put.batch.size.autocompute";
-  public static final String DEFAULT_HBASE_PUT_BATCH_SIZE_AUTO_COMPUTE = 
"false";
-  /**
-   * Property to set the fraction of the global share of QPS that should be 
allocated to this job. Let's say there are 3
-   * jobs which have input size in terms of number of rows required for 
HbaseIndexing as x, 2x, 3x respectively. Then
-   * this fraction for the jobs would be (0.17) 1/6, 0.33 (2/6) and 0.5 (3/6) 
respectively.
-   */
-  public static final String HBASE_QPS_FRACTION_PROP = 
"hoodie.index.hbase.qps.fraction";
-  /**
-   * Property to set maximum QPS allowed per Region Server. This should be 
same across various jobs. This is intended to
-   * limit the aggregate QPS generated across various jobs to an Hbase Region 
Server. It is recommended to set this
-   * value based on global indexing throughput needs and most importantly, how 
much the HBase installation in use is
-   * able to tolerate without Region Servers going down.
-   */
-  public static final String HBASE_MAX_QPS_PER_REGION_SERVER_PROP = 
"hoodie.index.hbase.max.qps.per.region.server";
-  /**
-   * Default batch size, used only for Get, but computed for Put.
-   */
-  public static final int DEFAULT_HBASE_BATCH_SIZE = 100;
-  /**
-   * A low default value.
-   */
-  public static final int DEFAULT_HBASE_MAX_QPS_PER_REGION_SERVER = 1000;
-  /**
-   * Default is 50%, which means a total of 2 jobs can run using HbaseIndex 
without overwhelming Region Servers.
-   */
-  public static final float DEFAULT_HBASE_QPS_FRACTION = 0.5f;
-
-  /**
-   * Property to decide if HBASE_QPS_FRACTION_PROP is dynamically calculated 
based on volume.
-   */
-  public static final String HOODIE_INDEX_COMPUTE_QPS_DYNAMICALLY = 
"hoodie.index.hbase.dynamic_qps";
-  public static final boolean DEFAULT_HOODIE_INDEX_COMPUTE_QPS_DYNAMICALLY = 
false;
-  /**
-   * Min and Max for HBASE_QPS_FRACTION_PROP to stabilize skewed volume 
workloads.
-   */
-  public static final String HBASE_MIN_QPS_FRACTION_PROP = 
"hoodie.index.hbase.min.qps.fraction";
-
-  public static final String HBASE_MAX_QPS_FRACTION_PROP = 
"hoodie.index.hbase.max.qps.fraction";
-
-  /**
-   * Hoodie index desired puts operation time in seconds.
-   */
-  public static final String HOODIE_INDEX_DESIRED_PUTS_TIME_IN_SECS = 
"hoodie.index.hbase.desired_puts_time_in_secs";
-  public static final int DEFAULT_HOODIE_INDEX_DESIRED_PUTS_TIME_IN_SECS = 600;
-  public static final String HBASE_SLEEP_MS_PUT_BATCH_PROP = 
"hoodie.index.hbase.sleep.ms.for.put.batch";
-  public static final String HBASE_SLEEP_MS_GET_BATCH_PROP = 
"hoodie.index.hbase.sleep.ms.for.get.batch";
-  public static final String HOODIE_INDEX_HBASE_ZK_SESSION_TIMEOUT_MS = 
"hoodie.index.hbase.zk.session_timeout_ms";
-  public static final int DEFAULT_ZK_SESSION_TIMEOUT_MS = 60 * 1000;
-  public static final String HOODIE_INDEX_HBASE_ZK_CONNECTION_TIMEOUT_MS =
-      "hoodie.index.hbase.zk.connection_timeout_ms";
-  public static final int DEFAULT_ZK_CONNECTION_TIMEOUT_MS = 15 * 1000;
-  public static final String HBASE_ZK_PATH_QPS_ROOT = 
"hoodie.index.hbase.zkpath.qps_root";
-  public static final String DEFAULT_HBASE_ZK_PATH_QPS_ROOT = "/QPS_ROOT";
-
-  /**
-   * Only applies if index type is Hbase.
-   * <p>
-   * When set to true, an update to a record with a different partition from 
its existing one
-   * will insert the record to the new partition and delete it from the old 
partition.
-   * <p>
-   * When set to false, a record will be updated to the old partition.
-   */
-  public static final String HBASE_INDEX_UPDATE_PARTITION_PATH = 
"hoodie.hbase.index.update.partition.path";
-  public static final Boolean DEFAULT_HBASE_INDEX_UPDATE_PARTITION_PATH = 
false;
-
-  /**
-   * When set to true, the rollback method will delete the last failed task 
index .
-   * The default value is false. Because deleting the index will add extra 
load on the Hbase cluster for each rollback.
-  */
-  public static final String HBASE_INDEX_ROLLBACK_SYNC = 
"hoodie.index.hbase.rollback.sync";
-  public static final Boolean DEFAULT_HBASE_INDEX_ROLLBACK_SYNC = false;
+public class HoodieHBaseIndexConfig extends HoodieConfig {
+
+  public static final ConfigOption<String> HBASE_ZKQUORUM_PROP = ConfigOption

Review comment:
       Just another thought while we are cleaning this up. We are using three 
words here with similar meaning - config, option, property.  Would be good to 
minimize them. 
   Ideally, we can rename `HBASE_ZKQUORUM_PROP` -> `HBASE_ZKQUORUM_OPT` like we 
have for DataSourceOptions. But, users have to do little more work than adding 
a `.key()`, when upgrading. 
   
   Alternatively, we can rename `ConfigOption` to `ConfigProp` . Wdyt?  Do you 
have a preference either way?

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/config/ConfigOption.java
##########
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.config;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.exception.HoodieException;
+
+import java.io.Serializable;
+import java.util.function.Function;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * ConfigOption describes a configuration parameter. It contains the 
configuration
+ * key, deprecated older versions of the key, and an optional default value 
for the configuration,
+ * configuration descriptions and also the an infer mechanism to infer the 
configuration value
+ * based on other configurations.
+ *
+ * @param <T> The type of the default value.
+ */
+public class ConfigOption<T> implements Serializable {
+
+  private final String key;
+
+  private final T defaultValue;
+
+  private final String description;

Review comment:
       rename: `doc` ?

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
##########
@@ -255,23 +286,17 @@ public HoodieFileFormat getBaseFileFormat() {
    * @return HoodieFileFormat for the log Storage format
    */
   public HoodieFileFormat getLogFileFormat() {
-    if (props.containsKey(HOODIE_LOG_FILE_FORMAT_PROP_NAME)) {

Review comment:
       I think this code and the one above handle older tables. Can we keep 
them as-is?

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
##########
@@ -50,52 +52,95 @@
  * @see HoodieTableMetaClient
  * @since 0.3.0
  */
-public class HoodieTableConfig implements Serializable {
+public class HoodieTableConfig extends HoodieConfig implements Serializable {
 
   private static final Logger LOG = 
LogManager.getLogger(HoodieTableConfig.class);
 
   public static final String HOODIE_PROPERTIES_FILE = "hoodie.properties";
-  public static final String HOODIE_TABLE_NAME_PROP_NAME = "hoodie.table.name";
-  public static final String HOODIE_TABLE_TYPE_PROP_NAME = "hoodie.table.type";
-  public static final String HOODIE_TABLE_VERSION_PROP_NAME = 
"hoodie.table.version";
-  public static final String HOODIE_TABLE_PRECOMBINE_FIELD = 
"hoodie.table.precombine.field";
-  public static final String HOODIE_TABLE_PARTITION_COLUMNS = 
"hoodie.table.partition.columns";
-
-  @Deprecated
-  public static final String HOODIE_RO_FILE_FORMAT_PROP_NAME = 
"hoodie.table.ro.file.format";
-  @Deprecated
-  public static final String HOODIE_RT_FILE_FORMAT_PROP_NAME = 
"hoodie.table.rt.file.format";
-  public static final String HOODIE_BASE_FILE_FORMAT_PROP_NAME = 
"hoodie.table.base.file.format";
-  public static final String HOODIE_LOG_FILE_FORMAT_PROP_NAME = 
"hoodie.table.log.file.format";
-  public static final String HOODIE_TIMELINE_LAYOUT_VERSION = 
"hoodie.timeline.layout.version";
-  public static final String HOODIE_PAYLOAD_CLASS_PROP_NAME = 
"hoodie.compaction.payload.class";
-  public static final String HOODIE_ARCHIVELOG_FOLDER_PROP_NAME = 
"hoodie.archivelog.folder";
-  public static final String HOODIE_BOOTSTRAP_INDEX_ENABLE = 
"hoodie.bootstrap.index.enable";
-  public static final String HOODIE_BOOTSTRAP_INDEX_CLASS_PROP_NAME = 
"hoodie.bootstrap.index.class";
-  public static final String HOODIE_BOOTSTRAP_BASE_PATH = 
"hoodie.bootstrap.base.path";
-
-  public static final HoodieTableType DEFAULT_TABLE_TYPE = 
HoodieTableType.COPY_ON_WRITE;
-  public static final HoodieTableVersion DEFAULT_TABLE_VERSION = 
HoodieTableVersion.ZERO;
-  public static final HoodieFileFormat DEFAULT_BASE_FILE_FORMAT = 
HoodieFileFormat.PARQUET;
-  public static final HoodieFileFormat DEFAULT_LOG_FILE_FORMAT = 
HoodieFileFormat.HOODIE_LOG;
-  public static final String DEFAULT_PAYLOAD_CLASS = 
OverwriteWithLatestAvroPayload.class.getName();
-  public static final String NO_OP_BOOTSTRAP_INDEX_CLASS = 
NoOpBootstrapIndex.class.getName();
-  public static final String DEFAULT_BOOTSTRAP_INDEX_CLASS = 
HFileBootstrapIndex.class.getName();
-  public static final String DEFAULT_ARCHIVELOG_FOLDER = "";
 
-  private Properties props;
+  public static final ConfigOption<String> HOODIE_TABLE_NAME_PROP_NAME = 
ConfigOption
+      .key("hoodie.table.name")
+      .noDefaultValue()
+      .withDescription("Table name that will be used for registering with 
Hive. Needs to be same across runs.");
+
+  public static final ConfigOption<HoodieTableType> 
HOODIE_TABLE_TYPE_PROP_NAME = ConfigOption
+      .key("hoodie.table.type")
+      .defaultValue(HoodieTableType.COPY_ON_WRITE)
+      .withDescription("The table type for the underlying data, for this 
write. This can’t change between writes.");
+
+  public static final ConfigOption<HoodieTableVersion> 
HOODIE_TABLE_VERSION_PROP_NAME = ConfigOption
+      .key("hoodie.table.version")
+      .defaultValue(HoodieTableVersion.ZERO)
+      .withDescription("");
+
+  public static final ConfigOption<String> HOODIE_TABLE_PRECOMBINE_FIELD = 
ConfigOption
+      .key("hoodie.table.precombine.field")
+      .noDefaultValue()
+      .withDescription("Field used in preCombining before actual write. When 
two records have the same key value, "
+          + "we will pick the one with the largest value for the precombine 
field, determined by Object.compareTo(..)");
+
+  public static final ConfigOption<String> HOODIE_TABLE_PARTITION_COLUMNS = 
ConfigOption
+      .key("hoodie.table.partition.columns")
+      .noDefaultValue()
+      .withDescription("Partition path field. Value to be used at the 
partitionPath component of HoodieKey. "
+          + "Actual value ontained by invoking .toString()");
+
+  public static final ConfigOption<HoodieFileFormat> 
HOODIE_BASE_FILE_FORMAT_PROP_NAME = ConfigOption
+      .key("hoodie.table.base.file.format")
+      .defaultValue(HoodieFileFormat.PARQUET)
+      .withAlternatives("hoodie.table.ro.file.format")
+      .withDescription("");
+
+  public static final ConfigOption<HoodieFileFormat> 
HOODIE_LOG_FILE_FORMAT_PROP_NAME = ConfigOption
+      .key("hoodie.table.log.file.format")
+      .defaultValue(HoodieFileFormat.HOODIE_LOG)
+      .withAlternatives("hoodie.table.rt.file.format")
+      .withDescription("");
+
+  public static final ConfigOption<String> HOODIE_TIMELINE_LAYOUT_VERSION = 
ConfigOption

Review comment:
       and some of these are missing the `_PROP` suffix we have for others. It 
would be good to ensure all of these are consistent.

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
##########
@@ -50,52 +52,95 @@
  * @see HoodieTableMetaClient
  * @since 0.3.0
  */
-public class HoodieTableConfig implements Serializable {
+public class HoodieTableConfig extends HoodieConfig implements Serializable {
 
   private static final Logger LOG = 
LogManager.getLogger(HoodieTableConfig.class);
 
   public static final String HOODIE_PROPERTIES_FILE = "hoodie.properties";
-  public static final String HOODIE_TABLE_NAME_PROP_NAME = "hoodie.table.name";
-  public static final String HOODIE_TABLE_TYPE_PROP_NAME = "hoodie.table.type";
-  public static final String HOODIE_TABLE_VERSION_PROP_NAME = 
"hoodie.table.version";
-  public static final String HOODIE_TABLE_PRECOMBINE_FIELD = 
"hoodie.table.precombine.field";
-  public static final String HOODIE_TABLE_PARTITION_COLUMNS = 
"hoodie.table.partition.columns";
-
-  @Deprecated
-  public static final String HOODIE_RO_FILE_FORMAT_PROP_NAME = 
"hoodie.table.ro.file.format";
-  @Deprecated
-  public static final String HOODIE_RT_FILE_FORMAT_PROP_NAME = 
"hoodie.table.rt.file.format";
-  public static final String HOODIE_BASE_FILE_FORMAT_PROP_NAME = 
"hoodie.table.base.file.format";
-  public static final String HOODIE_LOG_FILE_FORMAT_PROP_NAME = 
"hoodie.table.log.file.format";
-  public static final String HOODIE_TIMELINE_LAYOUT_VERSION = 
"hoodie.timeline.layout.version";
-  public static final String HOODIE_PAYLOAD_CLASS_PROP_NAME = 
"hoodie.compaction.payload.class";
-  public static final String HOODIE_ARCHIVELOG_FOLDER_PROP_NAME = 
"hoodie.archivelog.folder";
-  public static final String HOODIE_BOOTSTRAP_INDEX_ENABLE = 
"hoodie.bootstrap.index.enable";
-  public static final String HOODIE_BOOTSTRAP_INDEX_CLASS_PROP_NAME = 
"hoodie.bootstrap.index.class";
-  public static final String HOODIE_BOOTSTRAP_BASE_PATH = 
"hoodie.bootstrap.base.path";
-
-  public static final HoodieTableType DEFAULT_TABLE_TYPE = 
HoodieTableType.COPY_ON_WRITE;
-  public static final HoodieTableVersion DEFAULT_TABLE_VERSION = 
HoodieTableVersion.ZERO;
-  public static final HoodieFileFormat DEFAULT_BASE_FILE_FORMAT = 
HoodieFileFormat.PARQUET;
-  public static final HoodieFileFormat DEFAULT_LOG_FILE_FORMAT = 
HoodieFileFormat.HOODIE_LOG;
-  public static final String DEFAULT_PAYLOAD_CLASS = 
OverwriteWithLatestAvroPayload.class.getName();
-  public static final String NO_OP_BOOTSTRAP_INDEX_CLASS = 
NoOpBootstrapIndex.class.getName();
-  public static final String DEFAULT_BOOTSTRAP_INDEX_CLASS = 
HFileBootstrapIndex.class.getName();
-  public static final String DEFAULT_ARCHIVELOG_FOLDER = "";
 
-  private Properties props;
+  public static final ConfigOption<String> HOODIE_TABLE_NAME_PROP_NAME = 
ConfigOption
+      .key("hoodie.table.name")
+      .noDefaultValue()
+      .withDescription("Table name that will be used for registering with 
Hive. Needs to be same across runs.");
+
+  public static final ConfigOption<HoodieTableType> 
HOODIE_TABLE_TYPE_PROP_NAME = ConfigOption
+      .key("hoodie.table.type")
+      .defaultValue(HoodieTableType.COPY_ON_WRITE)
+      .withDescription("The table type for the underlying data, for this 
write. This can’t change between writes.");
+
+  public static final ConfigOption<HoodieTableVersion> 
HOODIE_TABLE_VERSION_PROP_NAME = ConfigOption
+      .key("hoodie.table.version")
+      .defaultValue(HoodieTableVersion.ZERO)
+      .withDescription("");
+
+  public static final ConfigOption<String> HOODIE_TABLE_PRECOMBINE_FIELD = 
ConfigOption
+      .key("hoodie.table.precombine.field")
+      .noDefaultValue()
+      .withDescription("Field used in preCombining before actual write. When 
two records have the same key value, "
+          + "we will pick the one with the largest value for the precombine 
field, determined by Object.compareTo(..)");
+
+  public static final ConfigOption<String> HOODIE_TABLE_PARTITION_COLUMNS = 
ConfigOption
+      .key("hoodie.table.partition.columns")
+      .noDefaultValue()
+      .withDescription("Partition path field. Value to be used at the 
partitionPath component of HoodieKey. "
+          + "Actual value ontained by invoking .toString()");
+
+  public static final ConfigOption<HoodieFileFormat> 
HOODIE_BASE_FILE_FORMAT_PROP_NAME = ConfigOption
+      .key("hoodie.table.base.file.format")
+      .defaultValue(HoodieFileFormat.PARQUET)
+      .withAlternatives("hoodie.table.ro.file.format")
+      .withDescription("");
+
+  public static final ConfigOption<HoodieFileFormat> 
HOODIE_LOG_FILE_FORMAT_PROP_NAME = ConfigOption
+      .key("hoodie.table.log.file.format")
+      .defaultValue(HoodieFileFormat.HOODIE_LOG)
+      .withAlternatives("hoodie.table.rt.file.format")
+      .withDescription("");
+
+  public static final ConfigOption<String> HOODIE_TIMELINE_LAYOUT_VERSION = 
ConfigOption
+      .key("hoodie.timeline.layout.version")
+      .noDefaultValue()
+      .withDescription("");

Review comment:
       I am happy to write some of these docs and push a commit. please let me 
know. 

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/config/ConfigOption.java
##########
@@ -0,0 +1,139 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.config;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.exception.HoodieException;
+
+import java.io.Serializable;
+import java.util.function.Function;
+import java.util.Map;
+import java.util.Objects;
+
+/**
+ * ConfigOption describes a configuration parameter. It contains the 
configuration
+ * key, deprecated older versions of the key, and an optional default value 
for the configuration,
+ * configuration descriptions and also the an infer mechanism to infer the 
configuration value
+ * based on other configurations.
+ *
+ * @param <T> The type of the default value.
+ */
+public class ConfigOption<T> implements Serializable {
+
+  private final String key;
+
+  private final T defaultValue;
+
+  private final String description;
+
+  private final Option<String> version;

Review comment:
       rename: sinceVersion? 

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
##########
@@ -50,52 +52,95 @@
  * @see HoodieTableMetaClient
  * @since 0.3.0
  */
-public class HoodieTableConfig implements Serializable {
+public class HoodieTableConfig extends HoodieConfig implements Serializable {

Review comment:
       thanks for doing this!

##########
File path: 
hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
##########
@@ -188,15 +203,6 @@ object DataSourceWriteOptions {
   @Deprecated
   val DEFAULT_STORAGE_TYPE_OPT_VAL = COW_STORAGE_TYPE_OPT_VAL
 
-  def translateStorageTypeToTableType(optParams: Map[String, String]) : 
Map[String, String] = {

Review comment:
       I think we should be able to deprecate these now. but does this need to 
happen in this PR?

##########
File path: 
hudi-common/src/test/java/org/apache/hudi/common/config/TestConfigOption.java
##########
@@ -0,0 +1,61 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.config;
+
+import org.junit.jupiter.api.Test;
+
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import static org.junit.jupiter.api.Assertions.assertEquals;
+import static org.junit.jupiter.api.Assertions.assertNull;
+
+public class TestConfigOption {

Review comment:
       can we also test things like alternatives, infer functions?

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/config/HoodieConfig.java
##########
@@ -0,0 +1,137 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.config;
+
+import org.apache.hudi.common.util.Option;
+import org.apache.log4j.LogManager;
+import org.apache.log4j.Logger;
+
+import java.io.Serializable;
+import java.util.Arrays;
+import java.util.Map;
+import java.util.Properties;
+
+/**
+ * This class deals with {@link org.apache.hudi.common.config.ConfigOption} 
and provides get/set functionalities.
+ */
+public class HoodieConfig implements Serializable {
+
+  private static final Logger LOG = LogManager.getLogger(HoodieConfig.class);
+
+  protected Properties props;
+
+  public HoodieConfig() {
+    this.props = new Properties();
+  }
+
+  public HoodieConfig(Properties props) {
+    this.props = props;
+  }
+
+  public static void setDefaultOnCondition(Properties props, boolean 
condition, HoodieConfig config) {
+    if (condition) {
+      props.putAll(config.getProps());
+    }
+  }
+
+  public static <T> void set(Properties props, ConfigOption<T> cfg, String 
val) {
+    props.setProperty(cfg.key(), val);
+  }
+
+  public static <T> void setDefaultValue(Properties props, ConfigOption<T> 
configOption) {

Review comment:
       goes well with `setDefaultValue()` ?

##########
File path: 
hudi-utilities/src/main/java/org/apache/hudi/utilities/callback/kafka/HoodieWriteCommitKafkaCallbackConfig.java
##########
@@ -17,32 +17,49 @@
 
 package org.apache.hudi.utilities.callback.kafka;
 
+import org.apache.hudi.common.config.ConfigOption;
+
 import java.util.Properties;
 
-import static 
org.apache.hudi.common.config.DefaultHoodieConfig.setDefaultOnCondition;
+import static org.apache.hudi.common.config.HoodieConfig.setDefaultValue;
 import static 
org.apache.hudi.config.HoodieWriteCommitCallbackConfig.CALLBACK_PREFIX;
 
 /**
  * Kafka write callback related config.
  */
 public class HoodieWriteCommitKafkaCallbackConfig {

Review comment:
       should this be a `HoodieConfig` class?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to