This is an automated email from the ASF dual-hosted git repository.

yihua pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 4b6df31a09f [HUDI-7798] Mark configs included in 0.15.0 release 
(#11306)
4b6df31a09f is described below

commit 4b6df31a09f486d35ce8951cfed700ecf5510aad
Author: Y Ethan Guo <[email protected]>
AuthorDate: Sat May 25 08:09:43 2024 -0700

    [HUDI-7798] Mark configs included in 0.15.0 release (#11306)
---
 .../hudi/config/GlueCatalogSyncClientConfig.java    | 10 +++++-----
 .../org/apache/hudi/config/HoodieAWSConfig.java     | 21 ++++++++++-----------
 .../org/apache/hudi/config/HoodieCleanConfig.java   |  2 +-
 .../apache/hudi/config/HoodieErrorTableConfig.java  |  2 +-
 .../org/apache/hudi/config/HoodieLockConfig.java    |  2 +-
 .../org/apache/hudi/config/HoodieWriteConfig.java   |  4 ++--
 .../hudi/common/config/HoodieStorageConfig.java     |  2 ++
 .../hudi/config/metrics/HoodieMetricsM3Config.java  | 16 +++++++++++-----
 .../utilities/config/ParquetDFSSourceConfig.java    |  2 +-
 .../config/S3EventsHoodieIncrSourceConfig.java      |  3 +++
 10 files changed, 37 insertions(+), 27 deletions(-)

diff --git 
a/hudi-aws/src/main/java/org/apache/hudi/config/GlueCatalogSyncClientConfig.java
 
b/hudi-aws/src/main/java/org/apache/hudi/config/GlueCatalogSyncClientConfig.java
index 0f6ac76a166..fd198eff626 100644
--- 
a/hudi-aws/src/main/java/org/apache/hudi/config/GlueCatalogSyncClientConfig.java
+++ 
b/hudi-aws/src/main/java/org/apache/hudi/config/GlueCatalogSyncClientConfig.java
@@ -50,21 +50,21 @@ public class GlueCatalogSyncClientConfig extends 
HoodieConfig {
       .defaultValue(1)
       .markAdvanced()
       .withValidValues(IntStream.rangeClosed(1, 
10).mapToObj(Integer::toString).toArray(String[]::new))
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Parallelism for listing all partitions(first time 
sync). Should be in interval [1, 10].");
 
   public static final ConfigProperty<Integer> 
CHANGED_PARTITIONS_READ_PARALLELISM = ConfigProperty
       .key(GLUE_CLIENT_PROPERTY_PREFIX + "changed_partitions_read_parallelism")
       .defaultValue(1)
       .markAdvanced()
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Parallelism for listing changed partitions(second 
and subsequent syncs).");
 
   public static final ConfigProperty<Integer> PARTITION_CHANGE_PARALLELISM = 
ConfigProperty
       .key(GLUE_CLIENT_PROPERTY_PREFIX + "partition_change_parallelism")
       .defaultValue(1)
       .markAdvanced()
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Parallelism for change operations - such as 
create/update/delete.");
 
   public static final ConfigProperty<Boolean> GLUE_METADATA_FILE_LISTING = 
ConfigProperty
@@ -77,7 +77,7 @@ public class GlueCatalogSyncClientConfig extends HoodieConfig 
{
   public static final ConfigProperty<Boolean> 
META_SYNC_PARTITION_INDEX_FIELDS_ENABLE = ConfigProperty
       .key(GLUE_CLIENT_PROPERTY_PREFIX + "partition_index_fields.enable")
       .defaultValue(false)
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Enable aws glue partition index feature, to speedup 
partition based query pattern");
 
   public static final ConfigProperty<String> META_SYNC_PARTITION_INDEX_FIELDS 
= ConfigProperty
@@ -85,7 +85,7 @@ public class GlueCatalogSyncClientConfig extends HoodieConfig 
{
       .noDefaultValue()
       .withInferFunction(cfg -> 
Option.ofNullable(cfg.getString(HoodieTableConfig.PARTITION_FIELDS))
           .or(() -> 
Option.ofNullable(cfg.getString(KeyGeneratorOptions.PARTITIONPATH_FIELD_NAME))))
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation(String.join(" ", "Specify the partitions fields to 
index on aws glue. Separate the fields by semicolon.",
           "By default, when the feature is enabled, all the partition will be 
indexed.",
           "You can create up to three indexes, separate them by comma. Eg: 
col1;col2;col3,col2,col3"));
diff --git a/hudi-aws/src/main/java/org/apache/hudi/config/HoodieAWSConfig.java 
b/hudi-aws/src/main/java/org/apache/hudi/config/HoodieAWSConfig.java
index 8a13652e0fa..23031c9f367 100644
--- a/hudi-aws/src/main/java/org/apache/hudi/config/HoodieAWSConfig.java
+++ b/hudi-aws/src/main/java/org/apache/hudi/config/HoodieAWSConfig.java
@@ -36,7 +36,6 @@ import static 
org.apache.hudi.config.DynamoDbBasedLockConfig.DYNAMODB_LOCK_READ_
 import static 
org.apache.hudi.config.DynamoDbBasedLockConfig.DYNAMODB_LOCK_REGION;
 import static 
org.apache.hudi.config.DynamoDbBasedLockConfig.DYNAMODB_LOCK_TABLE_NAME;
 import static 
org.apache.hudi.config.DynamoDbBasedLockConfig.DYNAMODB_LOCK_WRITE_CAPACITY;
-
 import static 
org.apache.hudi.config.GlueCatalogSyncClientConfig.GLUE_SKIP_TABLE_ARCHIVE;
 
 /**
@@ -91,18 +90,18 @@ public class HoodieAWSConfig extends HoodieConfig {
           .withDocumentation("External ID use when assuming the AWS Role");
 
   public static final ConfigProperty<String> AWS_GLUE_ENDPOINT = ConfigProperty
-          .key("hoodie.aws.glue.endpoint")
-          .noDefaultValue()
-          .markAdvanced()
-          .sinceVersion("0.14.2")
-          .withDocumentation("Aws glue endpoint");
+      .key("hoodie.aws.glue.endpoint")
+      .noDefaultValue()
+      .markAdvanced()
+      .sinceVersion("0.15.0")
+      .withDocumentation("Aws glue endpoint");
 
   public static final ConfigProperty<String> AWS_GLUE_REGION = ConfigProperty
-          .key("hoodie.aws.glue.region")
-          .noDefaultValue()
-          .markAdvanced()
-          .sinceVersion("0.14.2")
-          .withDocumentation("Aws glue endpoint");
+      .key("hoodie.aws.glue.region")
+      .noDefaultValue()
+      .markAdvanced()
+      .sinceVersion("0.15.0")
+      .withDocumentation("Aws glue endpoint");
 
   private HoodieAWSConfig() {
     super();
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCleanConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCleanConfig.java
index 473fd2e7b95..19eb12a17d2 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCleanConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieCleanConfig.java
@@ -180,7 +180,7 @@ public class HoodieCleanConfig extends HoodieConfig {
       .withAlternatives("hoodie.clean.allow.multiple")
       .markAdvanced()
       .sinceVersion("0.11.0")
-      .deprecatedAfter("1.0.0")
+      .deprecatedAfter("0.15.0")
       .withDocumentation("Allows scheduling/executing multiple cleans by 
enabling this config. If users prefer to strictly ensure clean requests should 
be mutually exclusive, "
           + ".i.e. a 2nd clean will not be scheduled if another clean is not 
yet completed to avoid repeat cleaning of same files, they might want to 
disable this config.");
 
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieErrorTableConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieErrorTableConfig.java
index 1db8f2c4b5f..9dba4fbc55f 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieErrorTableConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieErrorTableConfig.java
@@ -76,7 +76,7 @@ public class HoodieErrorTableConfig extends HoodieConfig {
   public static final ConfigProperty<Boolean> 
ERROR_ENABLE_VALIDATE_RECORD_CREATION = ConfigProperty
       .key("hoodie.errortable.validate.recordcreation.enable")
       .defaultValue(true)
-      .sinceVersion("0.14.2")
+      .sinceVersion("0.15.0")
       .withDocumentation("Records that fail to be created due to keygeneration 
failure or other issues will be sent to the Error Table");
 
   public static final ConfigProperty<String> 
ERROR_TABLE_WRITE_FAILURE_STRATEGY = ConfigProperty
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieLockConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieLockConfig.java
index 1c51b6db8b3..6e119ebc192 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieLockConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieLockConfig.java
@@ -117,7 +117,7 @@ public class HoodieLockConfig extends HoodieConfig {
   public static final ConfigProperty<Integer> LOCK_HEARTBEAT_INTERVAL_MS = 
ConfigProperty
       .key(LOCK_HEARTBEAT_INTERVAL_MS_KEY)
       .defaultValue(DEFAULT_LOCK_HEARTBEAT_INTERVAL_MS)
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Heartbeat interval in ms, to send a heartbeat to 
indicate that hive client holding locks.");
 
   public static final ConfigProperty<String> FILESYSTEM_LOCK_PATH = 
ConfigProperty
diff --git 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
index 2248ce03f7a..be32ad8ac34 100644
--- 
a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
+++ 
b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java
@@ -353,14 +353,14 @@ public class HoodieWriteConfig extends HoodieConfig {
       .key("hoodie.write.buffer.record.sampling.rate")
       .defaultValue(String.valueOf(64))
       .markAdvanced()
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Sampling rate of in-memory buffer used to estimate 
object size. Higher value lead to lower CPU usage.");
 
   public static final ConfigProperty<String> WRITE_BUFFER_RECORD_CACHE_LIMIT = 
ConfigProperty
       .key("hoodie.write.buffer.record.cache.limit")
       .defaultValue(String.valueOf(128 * 1024))
       .markAdvanced()
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Maximum queue size of in-memory buffer for 
parallelizing network reads and lake storage writes.");
 
   public static final ConfigProperty<String> 
WRITE_EXECUTOR_DISRUPTOR_BUFFER_LIMIT_BYTES = ConfigProperty
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java
 
b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java
index a595dcc17de..235754e624b 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/config/HoodieStorageConfig.java
@@ -155,6 +155,8 @@ public class HoodieStorageConfig extends HoodieConfig {
   public static final ConfigProperty<Boolean> 
PARQUET_WITH_BLOOM_FILTER_ENABLED = ConfigProperty
       .key("hoodie.parquet.bloom.filter.enabled")
       .defaultValue(true)
+      .markAdvanced()
+      .sinceVersion("0.15.0")
       .withDocumentation("Control whether to write bloom filter or not. 
Default true. "
           + "We can set to false in non bloom index cases for CPU resource 
saving.");
 
diff --git 
a/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsM3Config.java
 
b/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsM3Config.java
index cc675eebfbb..493eb0d7456 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsM3Config.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/config/metrics/HoodieMetricsM3Config.java
@@ -18,16 +18,17 @@
 
 package org.apache.hudi.config.metrics;
 
-import static org.apache.hudi.config.metrics.HoodieMetricsConfig.METRIC_PREFIX;
+import org.apache.hudi.common.config.ConfigClassProperty;
+import org.apache.hudi.common.config.ConfigGroups;
+import org.apache.hudi.common.config.ConfigProperty;
+import org.apache.hudi.common.config.HoodieConfig;
 
 import java.io.File;
 import java.io.FileReader;
 import java.io.IOException;
 import java.util.Properties;
-import org.apache.hudi.common.config.ConfigClassProperty;
-import org.apache.hudi.common.config.ConfigGroups;
-import org.apache.hudi.common.config.ConfigProperty;
-import org.apache.hudi.common.config.HoodieConfig;
+
+import static org.apache.hudi.config.metrics.HoodieMetricsConfig.METRIC_PREFIX;
 
 /**
  * Configs for M3 reporter type.
@@ -45,27 +46,32 @@ public class HoodieMetricsM3Config extends HoodieConfig {
   public static final ConfigProperty<String> M3_SERVER_HOST_NAME = 
ConfigProperty
       .key(M3_PREFIX + ".host")
       .defaultValue("localhost")
+      .sinceVersion("0.15.0")
       .withDocumentation("M3 host to connect to.");
 
   public static final ConfigProperty<Integer> M3_SERVER_PORT_NUM = 
ConfigProperty
       .key(M3_PREFIX + ".port")
       .defaultValue(9052)
+      .sinceVersion("0.15.0")
       .withDocumentation("M3 port to connect to.");
 
   public static final ConfigProperty<String> M3_TAGS = ConfigProperty
       .key(M3_PREFIX + ".tags")
       .defaultValue("")
+      .sinceVersion("0.15.0")
       .withDocumentation("Optional M3 tags applied to all metrics.");
 
   public static final ConfigProperty<String> M3_ENV = ConfigProperty
       .key(M3_PREFIX + ".env")
       .defaultValue("production")
+      .sinceVersion("0.15.0")
       .withDocumentation("M3 tag to label the environment (defaults to 
'production'), "
           + "applied to all metrics.");
 
   public static final ConfigProperty<String> M3_SERVICE = ConfigProperty
       .key(M3_PREFIX + ".service")
       .defaultValue("hoodie")
+      .sinceVersion("0.15.0")
       .withDocumentation("M3 tag to label the service name (defaults to 
'hoodie'), "
           + "applied to all metrics.");
 
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/ParquetDFSSourceConfig.java
 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/ParquetDFSSourceConfig.java
index b3bf5678baf..a8906c9f70b 100644
--- 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/ParquetDFSSourceConfig.java
+++ 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/ParquetDFSSourceConfig.java
@@ -44,6 +44,6 @@ public class ParquetDFSSourceConfig extends HoodieConfig {
       .defaultValue(false)
       .withAlternatives(DELTA_STREAMER_CONFIG_PREFIX + 
"source.parquet.dfs.merge_schema.enable")
       .markAdvanced()
-      .sinceVersion("1.0.0")
+      .sinceVersion("0.15.0")
       .withDocumentation("Merge schema across parquet files within a single 
write");
 }
diff --git 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/S3EventsHoodieIncrSourceConfig.java
 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/S3EventsHoodieIncrSourceConfig.java
index 23ecb96d795..58a7bc957d3 100644
--- 
a/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/S3EventsHoodieIncrSourceConfig.java
+++ 
b/hudi-utilities/src/main/java/org/apache/hudi/utilities/config/S3EventsHoodieIncrSourceConfig.java
@@ -54,6 +54,7 @@ public class S3EventsHoodieIncrSourceConfig extends 
HoodieConfig {
       .noDefaultValue()
       .withAlternatives(DELTA_STREAMER_CONFIG_PREFIX + 
"source.s3incr.key.prefix")
       .markAdvanced()
+      .deprecatedAfter("0.15.0")
       .withDocumentation("Control whether to filter the s3 objects starting 
with this prefix");
 
   public static final ConfigProperty<String> S3_FS_PREFIX = ConfigProperty
@@ -70,6 +71,7 @@ public class S3EventsHoodieIncrSourceConfig extends 
HoodieConfig {
       .noDefaultValue()
       .withAlternatives(DELTA_STREAMER_CONFIG_PREFIX + 
"source.s3incr.ignore.key.prefix")
       .markAdvanced()
+      .deprecatedAfter("0.15.0")
       .withDocumentation("Control whether to ignore the s3 objects starting 
with this prefix");
 
   @Deprecated
@@ -79,6 +81,7 @@ public class S3EventsHoodieIncrSourceConfig extends 
HoodieConfig {
       .noDefaultValue()
       .withAlternatives(DELTA_STREAMER_CONFIG_PREFIX + 
"source.s3incr.ignore.key.substring")
       .markAdvanced()
+      .deprecatedAfter("0.15.0")
       .withDocumentation("Control whether to ignore the s3 objects with this 
substring");
 
   public static final ConfigProperty<String> SPARK_DATASOURCE_OPTIONS = 
ConfigProperty

Reply via email to