This is an automated email from the ASF dual-hosted git repository. yihua pushed a commit to branch release-0.13.0 in repository https://gitbox.apache.org/repos/asf/hudi.git
commit 21e6e89c77bf7a2150c8ece48e29492e3dd60569 Author: Y Ethan Guo <[email protected]> AuthorDate: Sun Feb 5 00:28:20 2023 -0800 [MINOR] Improve configuration configs (#7855) --- .../apache/hudi/config/DynamoDbBasedLockConfig.java | 2 +- .../java/org/apache/hudi/config/HoodieWriteConfig.java | 4 +++- .../org/apache/hudi/common/config/ConfigGroups.java | 18 ++++++++++++++++++ .../org/apache/hudi/configuration/FlinkOptions.java | 4 +++- 4 files changed, 25 insertions(+), 3 deletions(-) diff --git a/hudi-aws/src/main/java/org/apache/hudi/config/DynamoDbBasedLockConfig.java b/hudi-aws/src/main/java/org/apache/hudi/config/DynamoDbBasedLockConfig.java index 89f2515c992..15e81bc90e3 100644 --- a/hudi-aws/src/main/java/org/apache/hudi/config/DynamoDbBasedLockConfig.java +++ b/hudi-aws/src/main/java/org/apache/hudi/config/DynamoDbBasedLockConfig.java @@ -82,7 +82,7 @@ public class DynamoDbBasedLockConfig extends HoodieConfig { .key(DYNAMODB_BASED_LOCK_PROPERTY_PREFIX + "billing_mode") .defaultValue(BillingMode.PAY_PER_REQUEST.name()) .sinceVersion("0.10.0") - .withDocumentation("For DynamoDB based lock provider, by default it is PAY_PER_REQUEST mode"); + .withDocumentation("For DynamoDB based lock provider, by default it is `PAY_PER_REQUEST` mode. Alternative is `PROVISIONED`."); public static final ConfigProperty<String> DYNAMODB_LOCK_READ_CAPACITY = ConfigProperty .key(DYNAMODB_BASED_LOCK_PROPERTY_PREFIX + "read_capacity") diff --git a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java index 707364b4b21..0ad33c46e81 100644 --- a/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java +++ b/hudi-client/hudi-client-common/src/main/java/org/apache/hudi/config/HoodieWriteConfig.java @@ -175,7 +175,9 @@ public class HoodieWriteConfig extends HoodieConfig { .key("hoodie.datasource.write.keygenerator.type") .defaultValue(KeyGeneratorType.SIMPLE.name()) .withDocumentation("Easily configure one the built-in key generators, instead of specifying the key generator class." - + "Currently supports SIMPLE, COMPLEX, TIMESTAMP, CUSTOM, NON_PARTITION, GLOBAL_DELETE"); + + "Currently supports SIMPLE, COMPLEX, TIMESTAMP, CUSTOM, NON_PARTITION, GLOBAL_DELETE. " + + "**Note** This is being actively worked on. Please use " + + "`hoodie.datasource.write.keygenerator.class` instead."); public static final ConfigProperty<String> ROLLBACK_USING_MARKERS_ENABLE = ConfigProperty .key("hoodie.rollback.using.markers") diff --git a/hudi-common/src/main/java/org/apache/hudi/common/config/ConfigGroups.java b/hudi-common/src/main/java/org/apache/hudi/common/config/ConfigGroups.java index 9bd61cba7b8..41110636cf1 100644 --- a/hudi-common/src/main/java/org/apache/hudi/common/config/ConfigGroups.java +++ b/hudi-common/src/main/java/org/apache/hudi/common/config/ConfigGroups.java @@ -29,6 +29,7 @@ public class ConfigGroups { * Config group names. */ public enum Names { + ENVIRONMENT_CONFIG("Environment Config"), SPARK_DATASOURCE("Spark Datasource Configs"), FLINK_SQL("Flink Sql Configs"), WRITE_CLIENT("Write Client Configs"), @@ -79,6 +80,23 @@ public class ConfigGroups { public static String getDescription(Names names) { String description; switch (names) { + case ENVIRONMENT_CONFIG: + description = "Hudi supports passing configurations via a configuration file " + + "`hudi-default.conf` in which each line consists of a key and a value " + + "separated by whitespace or = sign. For example:\n" + + "```\n" + + "hoodie.datasource.hive_sync.mode jdbc\n" + + "hoodie.datasource.hive_sync.jdbcurl jdbc:hive2://localhost:10000\n" + + "hoodie.datasource.hive_sync.support_timestamp false\n" + + "```\n" + + "It helps to have a central configuration file for your common cross " + + "job configurations/tunings, so all the jobs on your cluster can utilize it. " + + "It also works with Spark SQL DML/DDL, and helps avoid having to pass configs " + + "inside the SQL statements.\n\n" + + "By default, Hudi would load the configuration file under `/etc/hudi/conf` " + + "directory. You can specify a different configuration directory location by " + + "setting the `HUDI_CONF_DIR` environment variable."; + break; case SPARK_DATASOURCE: description = "These configs control the Hudi Spark Datasource, " + "providing ability to define keys/partitioning, pick out the write operation, " diff --git a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java index 63bb0d365a2..04678947aee 100644 --- a/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java +++ b/hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/configuration/FlinkOptions.java @@ -438,7 +438,9 @@ public class FlinkOptions extends HoodieConfig { .key(HoodieWriteConfig.KEYGENERATOR_TYPE.key()) .stringType() .defaultValue(KeyGeneratorType.SIMPLE.name()) - .withDescription("Key generator type, that implements will extract the key out of incoming record"); + .withDescription("Key generator type, that implements will extract the key out of incoming record. " + + "**Note** This is being actively worked on. Please use " + + "`hoodie.datasource.write.keygenerator.class` instead."); public static final String PARTITION_FORMAT_HOUR = "yyyyMMddHH"; public static final String PARTITION_FORMAT_DAY = "yyyyMMdd";
