This is an automated email from the ASF dual-hosted git repository.

xushiyan pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hudi.git


The following commit(s) were added to refs/heads/master by this push:
     new 50f29e261ab [HUDI-6083] Fix advanced config marking not taking effect 
in DataSourceOptions (#8466)
50f29e261ab is described below

commit 50f29e261ab422d84e5d6b2e08e3e7e8b2754d83
Author: Y Ethan Guo <[email protected]>
AuthorDate: Sat Apr 15 03:12:03 2023 -0700

    [HUDI-6083] Fix advanced config marking not taking effect in 
DataSourceOptions (#8466)
---
 .../hudi/common/table/HoodieTableConfig.java       |  1 +
 .../scala/org/apache/hudi/DataSourceOptions.scala  |  8 ++--
 .../org/apache/hudi/TestDataSourceOptions.scala    | 45 ++++++++++++++++++++++
 3 files changed, 49 insertions(+), 5 deletions(-)

diff --git 
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java 
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
index c8fbd5fcb7e..824e81005a3 100644
--- 
a/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
+++ 
b/hudi-common/src/main/java/org/apache/hudi/common/table/HoodieTableConfig.java
@@ -219,6 +219,7 @@ public class HoodieTableConfig extends HoodieConfig {
   public static final ConfigProperty<Boolean> DROP_PARTITION_COLUMNS = 
ConfigProperty
       .key("hoodie.datasource.write.drop.partition.columns")
       .defaultValue(false)
+      .markAdvanced()
       .withDocumentation("When set to true, will not write the partition 
columns into hudi. By default, false.");
 
   public static final ConfigProperty<String> URL_ENCODE_PARTITIONING = 
KeyGeneratorOptions.URL_ENCODE_PARTITIONING;
diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
index 5756f14269d..f678c26c6f1 100644
--- 
a/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/main/scala/org/apache/hudi/DataSourceOptions.scala
@@ -198,7 +198,7 @@ object DataSourceReadOptions {
     .markAdvanced()
     .withDocumentation("When doing an incremental query whether we should fall 
back to full table scans if file does not exist.")
 
-  val SCHEMA_EVOLUTION_ENABLED: ConfigProperty[Boolean] = 
HoodieCommonConfig.SCHEMA_EVOLUTION_ENABLE
+  val SCHEMA_EVOLUTION_ENABLED: ConfigProperty[java.lang.Boolean] = 
HoodieCommonConfig.SCHEMA_EVOLUTION_ENABLE
 
   /** @deprecated Use {@link QUERY_TYPE} and its methods instead */
   @Deprecated
@@ -257,8 +257,6 @@ object DataSourceReadOptions {
   */
 object DataSourceWriteOptions {
 
-  import DataSourceOptionsHelper._
-
   val BULK_INSERT_OPERATION_OPT_VAL = WriteOperationType.BULK_INSERT.value
   val INSERT_OPERATION_OPT_VAL = WriteOperationType.INSERT.value
   val UPSERT_OPERATION_OPT_VAL = WriteOperationType.UPSERT.value
@@ -491,7 +489,7 @@ object DataSourceWriteOptions {
     .markAdvanced()
     .withDocumentation("Sync tool class name used to sync to metastore. 
Defaults to Hive.")
 
-  val RECONCILE_SCHEMA: ConfigProperty[Boolean] = 
HoodieCommonConfig.RECONCILE_SCHEMA.markAdvanced()
+  val RECONCILE_SCHEMA: ConfigProperty[java.lang.Boolean] = 
HoodieCommonConfig.RECONCILE_SCHEMA
 
   // HIVE SYNC SPECIFIC CONFIGS
   // NOTE: DO NOT USE uppercase for the keys as they are internally 
lower-cased. Using upper-cases causes
@@ -582,7 +580,7 @@ object DataSourceWriteOptions {
     .sinceVersion("0.9.0")
     .withDocumentation("This class is used by kafka client to deserialize the 
records")
 
-  val DROP_PARTITION_COLUMNS: ConfigProperty[Boolean] = 
HoodieTableConfig.DROP_PARTITION_COLUMNS.markAdvanced()
+  val DROP_PARTITION_COLUMNS: ConfigProperty[java.lang.Boolean] = 
HoodieTableConfig.DROP_PARTITION_COLUMNS
 
   /** @deprecated Use {@link HIVE_ASSUME_DATE_PARTITION} and its methods 
instead */
   @Deprecated
diff --git 
a/hudi-spark-datasource/hudi-spark-common/src/test/scala/org/apache/hudi/TestDataSourceOptions.scala
 
b/hudi-spark-datasource/hudi-spark-common/src/test/scala/org/apache/hudi/TestDataSourceOptions.scala
new file mode 100644
index 00000000000..9ba43044b3d
--- /dev/null
+++ 
b/hudi-spark-datasource/hudi-spark-common/src/test/scala/org/apache/hudi/TestDataSourceOptions.scala
@@ -0,0 +1,45 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied.  See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.apache.hudi
+
+import org.apache.hudi.common.config.HoodieCommonConfig
+import org.apache.hudi.common.table.HoodieTableConfig
+import org.junit.jupiter.api.Assertions.{assertEquals, assertTrue}
+import org.junit.jupiter.api.Test
+
+class TestDataSourceOptions {
+  @Test
+  def testAdvancedConfigs(): Unit = {
+    assertTrue(DataSourceReadOptions.SCHEMA_EVOLUTION_ENABLED.isAdvanced)
+    assertEquals(
+      HoodieCommonConfig.SCHEMA_EVOLUTION_ENABLE.defaultValue(),
+      DataSourceReadOptions.SCHEMA_EVOLUTION_ENABLED.defaultValue())
+
+    assertTrue(DataSourceWriteOptions.RECONCILE_SCHEMA.isAdvanced)
+    assertEquals(
+      HoodieCommonConfig.RECONCILE_SCHEMA.defaultValue(),
+      DataSourceWriteOptions.RECONCILE_SCHEMA.defaultValue())
+
+    assertTrue(DataSourceWriteOptions.DROP_PARTITION_COLUMNS.isAdvanced)
+    assertEquals(
+      HoodieTableConfig.DROP_PARTITION_COLUMNS.defaultValue(),
+      DataSourceWriteOptions.DROP_PARTITION_COLUMNS.defaultValue())
+  }
+}

Reply via email to