This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new d4dcb3d1190 [HUDI-5618] Add `since version` to new configs for 0.13.0 
release (#7751)
d4dcb3d1190 is described below

commit d4dcb3d1190261687ee4f46ba7a2e89d8424aafb
Author: Y Ethan Guo <[email protected]>
AuthorDate: Wed Jan 25 17:28:42 2023 -0800

    [HUDI-5618] Add `since version` to new configs for 0.13.0 release (#7751)
    
    This PR adds the `since version` to new configs for 0.13.0 release.
---
 .../java/org/apache/hudi/config/HoodieCompactionConfig.java  |  3 +++
 .../main/java/org/apache/hudi/config/HoodieIndexConfig.java  |  4 ++++
 .../main/java/org/apache/hudi/config/HoodieWriteConfig.java  |  6 ++++++
 .../org/apache/hudi/config/metrics/HoodieMetricsConfig.java  |  1 +
 .../apache/hudi/common/config/HoodieMetaserverConfig.java    |  6 ++++--
 .../hudi/common/config/HoodieTableServiceManagerConfig.java  | 12 ++++++++++++
 .../java/org/apache/hudi/common/table/HoodieTableConfig.java |  4 ++++
 .../hudi/common/table/view/FileSystemViewStorageConfig.java  |  1 +
 .../src/main/scala/org/apache/hudi/DataSourceOptions.scala   |  7 ++++---
 .../main/java/org/apache/hudi/hive/HiveSyncConfigHolder.java |  7 ++++---
 10 files changed, 43 insertions(+), 8 deletions(-)

diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
index bdb346bd865..e22bf1e43d1 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCompactionConfig.java
@@ -64,6 +64,7 @@ public class HoodieCompactionConfig extends HoodieConfig {
   public static final ConfigProperty<String> INLINE_LOG_COMPACT = 
ConfigProperty
       .key("hoodie.log.compaction.inline")
       .defaultValue("false")
+      .sinceVersion("0.13.0")
       .withDocumentation("When set to true, logcompaction service is triggered 
after each write. While being "
           + " simpler operationally, this adds extra latency on the write 
path.");
 
@@ -119,6 +120,7 @@ public class HoodieCompactionConfig extends HoodieConfig {
   public static final ConfigProperty<Long> COMPACTION_LOG_FILE_NUM_THRESHOLD = 
ConfigProperty
       .key("hoodie.compaction.logfile.num.threshold")
       .defaultValue(0L)
+      .sinceVersion("0.13.0")
       .withDocumentation("Only if the log file num is greater than the 
threshold,"
           + " the file group will be compacted.");
 
@@ -182,6 +184,7 @@ public class HoodieCompactionConfig extends HoodieConfig {
   public static final ConfigProperty<String> LOG_COMPACTION_BLOCKS_THRESHOLD = 
ConfigProperty
       .key("hoodie.log.compaction.blocks.threshold")
       .defaultValue("5")
+      .sinceVersion("0.13.0")
       .withDocumentation("Log compaction can be scheduled if the no. of log 
blocks crosses this threshold value. "
           + "This is effective only when log compaction is enabled via " + 
INLINE_LOG_COMPACT.key());
 
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
index c250e07f33f..87e9ae576db 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieIndexConfig.java
@@ -272,12 +272,14 @@ public class HoodieIndexConfig extends HoodieConfig {
   public static final ConfigProperty<String> BUCKET_INDEX_MAX_NUM_BUCKETS = 
ConfigProperty
       .key("hoodie.bucket.index.max.num.buckets")
       .noDefaultValue()
+      .sinceVersion("0.13.0")
       .withDocumentation("Only applies if bucket index engine is consistent 
hashing. Determine the upper bound of "
           + "the number of buckets in the hudi table. Bucket resizing cannot 
be done higher than this max limit.");
 
   public static final ConfigProperty<String> BUCKET_INDEX_MIN_NUM_BUCKETS = 
ConfigProperty
       .key("hoodie.bucket.index.min.num.buckets")
       .noDefaultValue()
+      .sinceVersion("0.13.0")
       .withDocumentation("Only applies if bucket index engine is consistent 
hashing. Determine the lower bound of "
           + "the number of buckets in the hudi table. Bucket resizing cannot 
be done lower than this min limit.");
 
@@ -290,12 +292,14 @@ public class HoodieIndexConfig extends HoodieConfig {
   public static final ConfigProperty<Double> BUCKET_SPLIT_THRESHOLD = 
ConfigProperty
       .key("hoodie.bucket.index.split.threshold")
       .defaultValue(2.0)
+      .sinceVersion("0.13.0")
       .withDocumentation("Control if the bucket should be split when using 
consistent hashing bucket index."
           + "Specifically, if a file slice size reaches 
`hoodie.xxxx.max.file.size` * threshold, then split will be carried out.");
 
   public static final ConfigProperty<Double> BUCKET_MERGE_THRESHOLD = 
ConfigProperty
       .key("hoodie.bucket.index.merge.threshold")
       .defaultValue(0.2)
+      .sinceVersion("0.13.0")
       .withDocumentation("Control if buckets should be merged when using 
consistent hashing bucket index"
           + "Specifically, if a file slice size is smaller than 
`hoodie.xxxx.max.file.size` * threshold, then it will be considered"
           + "as a merge candidate.");
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
index 98f8df67b9c..fcee1b4b0d6 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
@@ -141,6 +141,7 @@ public class HoodieWriteConfig extends HoodieConfig {
   public static final ConfigProperty<String> RECORD_MERGER_IMPLS = 
ConfigProperty
       .key("hoodie.datasource.write.record.merger.impls")
       .defaultValue(HoodieAvroRecordMerger.class.getName())
+      .sinceVersion("0.13.0")
       .withDocumentation("List of HoodieMerger implementations constituting 
Hudi's merging strategy -- based on the engine used. "
           + "These merger impls will filter by 
hoodie.datasource.write.record.merger.strategy "
           + "Hudi will pick most efficient implementation to perform 
merging/combining of the records (during update, reading MOR table, etc)");
@@ -148,6 +149,7 @@ public class HoodieWriteConfig extends HoodieConfig {
   public static final ConfigProperty<String> RECORD_MERGER_STRATEGY = 
ConfigProperty
       .key("hoodie.datasource.write.record.merger.strategy")
       .defaultValue(HoodieRecordMerger.DEFAULT_MERGER_STRATEGY_UUID)
+      .sinceVersion("0.13.0")
       .withDocumentation("Id of merger strategy. Hudi will pick 
HoodieRecordMerger implementations in 
hoodie.datasource.write.record.merger.impls which has the same merger strategy 
id");
 
   public static final ConfigProperty<String> KEYGENERATOR_CLASS_NAME = 
ConfigProperty
@@ -160,6 +162,7 @@ public class HoodieWriteConfig extends HoodieConfig {
       .key("hoodie.write.executor.type")
       .defaultValue(BOUNDED_IN_MEMORY.name())
       .withValidValues(BOUNDED_IN_MEMORY.name(), DISRUPTOR.name())
+      .sinceVersion("0.13.0")
       .withDocumentation("Set executor which orchestrates concurrent producers 
and consumers communicating through a message queue."
           + "BOUNDED_IN_MEMORY(default): Use LinkedBlockingQueue as a bounded 
in-memory queue, this queue will use extra lock to balance producers and 
consumer"
           + "DISRUPTOR: Use disruptor which a lock free message queue as inner 
message, this queue may gain better writing performance if lock was the 
bottleneck. "
@@ -271,11 +274,13 @@ public class HoodieWriteConfig extends HoodieConfig {
   public static final ConfigProperty<String> WRITE_DISRUPTOR_BUFFER_SIZE = 
ConfigProperty
       .key("hoodie.write.executor.disruptor.buffer.size")
       .defaultValue(String.valueOf(1024))
+      .sinceVersion("0.13.0")
       .withDocumentation("The size of the Disruptor Executor ring buffer, must 
be power of 2");
 
   public static final ConfigProperty<String> WRITE_WAIT_STRATEGY = 
ConfigProperty
       .key("hoodie.write.executor.disruptor.wait.strategy")
       .defaultValue("BLOCKING_WAIT")
+      .sinceVersion("0.13.0")
       .withDocumentation("Strategy employed for making Disruptor Executor wait 
on a cursor. Other options are "
           + "SLEEPING_WAIT, it attempts to be conservative with CPU usage by 
using a simple busy wait loop"
           + "YIELDING_WAIT, it is designed for cases where there is the option 
to burn CPU cycles with the goal of improving latency"
@@ -422,6 +427,7 @@ public class HoodieWriteConfig extends HoodieConfig {
   public static final ConfigProperty<String> 
FAIL_ON_INLINE_TABLE_SERVICE_EXCEPTION = ConfigProperty
       .key("hoodie.fail.writes.on.inline.table.service.exception")
       .defaultValue("true")
+      .sinceVersion("0.13.0")
       .withDocumentation("Table services such as compaction and clustering can 
fail and prevent syncing to "
           + "the metaclient. Set this to true to fail writes when table 
services fail");
 
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsConfig.java
index 957b439051a..486f1277ba7 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsConfig.java
@@ -92,6 +92,7 @@ public class HoodieMetricsConfig extends HoodieConfig {
         }
         return Option.empty();
       })
+      .sinceVersion("0.13.0")
       .withDocumentation("Enable metrics for locking infra. Useful when 
operating in multiwriter mode");
 
   /**
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetaserverConfig.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetaserverConfig.java
index 52b113ba862..2510f600f55 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetaserverConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieMetaserverConfig.java
@@ -41,9 +41,11 @@ public class HoodieMetaserverConfig extends HoodieConfig {
       .sinceVersion("0.13.0")
       .withDocumentation("Enable Hudi metaserver for storing Hudi tables' 
metadata.");
 
-  public static final ConfigProperty<String> DATABASE_NAME = 
HoodieTableConfig.DATABASE_NAME;
+  public static final ConfigProperty<String> DATABASE_NAME = 
HoodieTableConfig.DATABASE_NAME
+      .sinceVersion("0.13.0");
 
-  public static final ConfigProperty<String> TABLE_NAME = 
HoodieTableConfig.NAME;
+  public static final ConfigProperty<String> TABLE_NAME = 
HoodieTableConfig.NAME
+      .sinceVersion("0.13.0");
 
   public static final ConfigProperty<String> METASERVER_URLS = ConfigProperty
       .key(METASERVER_PREFIX + ".uris")
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieTableServiceManagerConfig.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieTableServiceManagerConfig.java
index fa0c8c17251..501b7fc421b 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieTableServiceManagerConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieTableServiceManagerConfig.java
@@ -38,61 +38,73 @@ public class HoodieTableServiceManagerConfig extends 
HoodieConfig {
   public static final ConfigProperty<Boolean> TABLE_SERVICE_MANAGER_ENABLED = 
ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".enabled")
       .defaultValue(false)
+      .sinceVersion("0.13.0")
       .withDocumentation("If true, use table service manager to execute table 
service");
 
   public static final ConfigProperty<String> TABLE_SERVICE_MANAGER_URIS = 
ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".uris")
       .defaultValue("http://localhost:9091";)
+      .sinceVersion("0.13.0")
       .withDocumentation("Table service manager URIs (comma-delimited).");
 
   public static final ConfigProperty<String> TABLE_SERVICE_MANAGER_ACTIONS = 
ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".actions")
       .noDefaultValue()
+      .sinceVersion("0.13.0")
       .withDocumentation("The actions deployed on table service manager, such 
as compaction or clean.");
 
   public static final ConfigProperty<String> 
TABLE_SERVICE_MANAGER_DEPLOY_USERNAME = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".deploy.username")
       .defaultValue("default")
+      .sinceVersion("0.13.0")
       .withDocumentation("The user name for this table to deploy table 
services.");
 
   public static final ConfigProperty<String> 
TABLE_SERVICE_MANAGER_DEPLOY_QUEUE = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".deploy.queue")
       .defaultValue("default")
+      .sinceVersion("0.13.0")
       .withDocumentation("The queue for this table to deploy table services.");
 
   public static final ConfigProperty<String> 
TABLE_SERVICE_MANAGER_DEPLOY_RESOURCES = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".deploy.resources")
       .defaultValue("spark:4g,4g")
+      .sinceVersion("0.13.0")
       .withDocumentation("The resources for this table to use for deploying 
table services.");
 
   public static final ConfigProperty<Integer> 
TABLE_SERVICE_MANAGER_DEPLOY_PARALLELISM = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".deploy.parallelism")
       .defaultValue(100)
+      .sinceVersion("0.13.0")
       .withDocumentation("The parallelism for this table to deploy table 
services.");
 
   public static final ConfigProperty<String> 
TABLE_SERVICE_MANAGER_DEPLOY_EXECUTION_ENGINE = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".execution.engine")
       .defaultValue("spark")
+      .sinceVersion("0.13.0")
       .withDocumentation("The execution engine to deploy for table service of 
this table, default spark");
 
   public static final ConfigProperty<String> 
TABLE_SERVICE_MANAGER_DEPLOY_EXTRA_PARAMS = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".deploy.extra.params")
       .noDefaultValue()
+      .sinceVersion("0.13.0")
       .withDocumentation("The extra params to deploy for table service of this 
table, split by ';'");
 
   public static final ConfigProperty<Integer> 
TABLE_SERVICE_MANAGER_TIMEOUT_SEC = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".connection.timeout.sec")
       .defaultValue(300)
+      .sinceVersion("0.13.0")
       .withDocumentation("Timeout in seconds for connections to table service 
manager.");
 
   public static final ConfigProperty<Integer> TABLE_SERVICE_MANAGER_RETRIES = 
ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".connection.retries")
       .defaultValue(3)
+      .sinceVersion("0.13.0")
       .withDocumentation("Number of retries while opening a connection to 
table service manager");
 
   public static final ConfigProperty<Integer> 
TABLE_SERVICE_MANAGER_RETRY_DELAY_SEC = ConfigProperty
       .key(TABLE_SERVICE_MANAGER_PREFIX + ".connection.retry.delay.sec")
       .defaultValue(1)
+      .sinceVersion("0.13.0")
       .withDocumentation("Number of seconds for the client to wait between 
consecutive connection attempts");
 
   public static HoodieTableServiceManagerConfig.Builder newBuilder() {
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java 
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
index 90b10c60ed0..e450614e2b6 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
@@ -132,6 +132,7 @@ public class HoodieTableConfig extends HoodieConfig {
   public static final ConfigProperty<Boolean> CDC_ENABLED = ConfigProperty
       .key("hoodie.table.cdc.enabled")
       .defaultValue(false)
+      .sinceVersion("0.13.0")
       .withDocumentation("When enable, persist the change data if necessary, 
and can be queried as a CDC query mode.");
 
   public static final ConfigProperty<String> CDC_SUPPLEMENTAL_LOGGING_MODE = 
ConfigProperty
@@ -141,6 +142,7 @@ public class HoodieTableConfig extends HoodieConfig {
           HoodieCDCSupplementalLoggingMode.OP_KEY.getValue(),
           HoodieCDCSupplementalLoggingMode.WITH_BEFORE.getValue(),
           HoodieCDCSupplementalLoggingMode.WITH_BEFORE_AFTER.getValue())
+      .sinceVersion("0.13.0")
       .withDocumentation("When 'cdc_op_key' persist the 'op' and the record 
key only,"
           + " when 'cdc_data_before' persist the additional 'before' image ,"
           + " and when 'cdc_data_before_after', persist the 'before' and 
'after' at the same time.");
@@ -176,6 +178,7 @@ public class HoodieTableConfig extends HoodieConfig {
   public static final ConfigProperty<String> RECORD_MERGER_STRATEGY = 
ConfigProperty
       .key("hoodie.compaction.record.merger.strategy")
       .defaultValue(HoodieRecordMerger.DEFAULT_MERGER_STRATEGY_UUID)
+      .sinceVersion("0.13.0")
       .withDocumentation("Id of merger strategy. Hudi will pick 
HoodieRecordMerger implementations in 
hoodie.datasource.write.record.merger.impls which has the same merger strategy 
id");
 
   public static final ConfigProperty<String> ARCHIVELOG_FOLDER = ConfigProperty
@@ -261,6 +264,7 @@ public class HoodieTableConfig extends HoodieConfig {
   public static final ConfigProperty<String> SECONDARY_INDEXES_METADATA = 
ConfigProperty
       .key("hoodie.table.secondary.indexes.metadata")
       .noDefaultValue()
+      .sinceVersion("0.13.0")
       .withDocumentation("The metadata of secondary indexes");
 
   private static final String TABLE_CHECKSUM_FORMAT = "%s.%s"; // 
<database_name>.<table_name>
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
index 2f43fb9b4f3..b015bf745bc 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/view/FileSystemViewStorageConfig.java
@@ -88,6 +88,7 @@ public class FileSystemViewStorageConfig extends HoodieConfig 
{
   public static final ConfigProperty<Double> 
SPILLABLE_LOG_COMPACTION_MEM_FRACTION = ConfigProperty
       .key("hoodie.filesystem.view.spillable.log.compaction.mem.fraction")
       .defaultValue(0.8)
+      .sinceVersion("0.13.0")
       .withDocumentation("Fraction of the file system view memory, to be used 
for holding log compaction related metadata.");
 
   public static final ConfigProperty<Double> BOOTSTRAP_BASE_FILE_MEM_FRACTION 
= ConfigProperty
diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
index 5de9cc8a411..9ed04dae626 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
@@ -19,12 +19,12 @@ package org.apache.hudi
 
 import org.apache.hudi.DataSourceReadOptions.{QUERY_TYPE, 
QUERY_TYPE_READ_OPTIMIZED_OPT_VAL, QUERY_TYPE_SNAPSHOT_OPT_VAL}
 import org.apache.hudi.HoodieConversionUtils.toScalaOption
-import org.apache.hudi.common.config.{ConfigProperty, 
DFSPropertiesConfiguration, HoodieCommonConfig, HoodieConfig, TypedProperties}
+import org.apache.hudi.common.config._
 import org.apache.hudi.common.fs.ConsistencyGuardConfig
 import org.apache.hudi.common.model.{HoodieTableType, WriteOperationType}
 import org.apache.hudi.common.table.HoodieTableConfig
-import org.apache.hudi.common.util.{Option, StringUtils}
 import org.apache.hudi.common.util.ValidationUtils.checkState
+import org.apache.hudi.common.util.{Option, StringUtils}
 import org.apache.hudi.config.{HoodieClusteringConfig, HoodieWriteConfig}
 import org.apache.hudi.hive.{HiveSyncConfig, HiveSyncConfigHolder, 
HiveSyncTool}
 import org.apache.hudi.keygen.constant.KeyGeneratorOptions
@@ -35,7 +35,6 @@ import org.apache.hudi.util.JFunction
 import org.apache.log4j.LogManager
 import org.apache.spark.sql.execution.datasources.{DataSourceUtils => 
SparkDataSourceUtils}
 
-import java.util.function.{Function => JavaFunction}
 import scala.collection.JavaConverters._
 import scala.language.implicitConversions
 
@@ -68,6 +67,7 @@ object DataSourceReadOptions {
     .key("hoodie.datasource.query.incremental.format")
     .defaultValue(INCREMENTAL_FORMAT_LATEST_STATE_VAL)
     .withValidValues(INCREMENTAL_FORMAT_LATEST_STATE_VAL, 
INCREMENTAL_FORMAT_CDC_VAL)
+    .sinceVersion("0.13.0")
     .withDocumentation("This config is used alone with the 'incremental' query 
type." +
       "When set to 'latest_state', it returns the latest records' values." +
       "When set to 'cdc', it returns the cdc data.")
@@ -99,6 +99,7 @@ object DataSourceReadOptions {
   val START_OFFSET: ConfigProperty[String] = ConfigProperty
     .key("hoodie.datasource.streaming.startOffset")
     .defaultValue("earliest")
+    .sinceVersion("0.13.0")
     .withDocumentation("Start offset to pull data from hoodie streaming 
source. allow earliest, latest, and " +
       "specified start instant time")
 
diff --git 
a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/HiveSyncConfigHolder.java
 
b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/HiveSyncConfigHolder.java
index 5fb5b6a79c6..d9c502981ed 100644
--- 
a/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/HiveSyncConfigHolder.java
+++ 
b/hudi-sync/hudi-hive-sync/src/main/java/org/apache/hudi/hive/HiveSyncConfigHolder.java
@@ -133,7 +133,8 @@ public class HiveSyncConfigHolder {
       .withDocumentation("Whether to sync the table column comments while 
syncing the table.");
 
   public static final ConfigProperty<String> HIVE_SYNC_TABLE_STRATEGY = 
ConfigProperty
-          .key("hoodie.datasource.hive_sync.table.strategy")
-          .defaultValue(HoodieSyncTableStrategy.ALL.name())
-          .withDocumentation("Hive table synchronization strategy. Available 
option: ONLY_RO, ONLY_RT, ALL.");
+      .key("hoodie.datasource.hive_sync.table.strategy")
+      .defaultValue(HoodieSyncTableStrategy.ALL.name())
+      .sinceVersion("0.13.0")
+      .withDocumentation("Hive table synchronization strategy. Available 
option: ONLY_RO, ONLY_RT, ALL.");
 }

Reply via email to