xushiyan commented on a change in pull request #3936:
URL: https://github.com/apache/hudi/pull/3936#discussion_r745241470



##########
File path: 
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/HoodieOptionConfig.scala
##########
@@ -102,6 +107,8 @@ object HoodieOptionConfig {
 
   private lazy val reverseValueMapping = valueMapping.map(f => f._2 -> f._1)
 
+  def withDefaultSqlOption(options: Map[String, String]): Map[String, String] 
= defaultSqlOption ++ options

Review comment:
       ```suggestion
     def withDefaultSqlOptions(options: Map[String, String]): Map[String, 
String] = defaultSqlOptions ++ options
   ```

##########
File path: 
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/AlterHoodieTableDropPartitionCommand.scala
##########
@@ -104,10 +104,6 @@ extends RunnableCommand {
         PARTITIONPATH_FIELD.key -> tableConfig.getPartitionFieldProp
       )
     }
-
-    val parameters = HoodieWriterUtils.parametersWithWriteDefaults(optParams)
-    val translatedOptions = 
DataSourceWriteOptions.translateSqlOptions(parameters)
-    translatedOptions

Review comment:
       can you clarify the reason why these no longer needed?

##########
File path: 
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/HoodieOptionConfig.scala
##########
@@ -136,16 +142,19 @@ object HoodieOptionConfig {
     options.map(kv => tableConfigKeyToSqlKey.getOrElse(kv._1, kv._1) -> 
reverseValueMapping.getOrElse(kv._2, kv._2))
   }
 
-  private lazy val defaultTableConfig: Map[String, String] = {
+  private lazy val defaultSqlOption: Map[String, String] = {

Review comment:
       ```suggestion
     private lazy val defaultSqlOptions: Map[String, String] = {
   ```

##########
File path: 
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/hudi/HoodieWriterUtils.scala
##########
@@ -102,4 +106,73 @@ object HoodieWriterUtils {
     properties.putAll(mapAsJavaMap(parameters))
     new HoodieConfig(properties)
   }
+
+  def getRealKeyGenerator(hoodieConfig: HoodieConfig): String = {
+    val kg = hoodieConfig.getString(KEYGENERATOR_CLASS_NAME.key())
+    if (classOf[SqlKeyGenerator].getCanonicalName == kg) {
+      hoodieConfig.getString(SqlKeyGenerator.ORIGIN_KEYGEN_CLASS_NAME)
+    } else {
+      kg
+    }
+  }
+
+  // Detects conflicts between new parameters and existing table configurations

Review comment:
       ```suggestion
     /**
      * Detects conflicts between new parameters and existing table 
configurations
      */ 
   ```

##########
File path: 
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/CreateHoodieTableAsSelectCommand.scala
##########
@@ -73,9 +75,10 @@ case class CreateHoodieTableAsSelectCommand(
 
     // Execute the insert query
     try {
+      val tblProperties = table.storage.properties ++ table.properties

Review comment:
       have seen this needed repeated. can we consider making a 
`HoodieCatalogTable` to encapsulate this and other hudi specific logic inside, 
e.g. validation, options transform, etc

##########
File path: 
hudi-common/src/main/java/org/apache/hudi/common/model/DefaultHoodieRecordPayload.java
##########
@@ -113,7 +113,7 @@ protected boolean needUpdatingPersistedRecord(IndexedRecord 
currentValue,
     Object persistedOrderingVal = getNestedFieldVal((GenericRecord) 
currentValue,
         
properties.getProperty(HoodiePayloadProps.PAYLOAD_ORDERING_FIELD_PROP_KEY), 
true);
     Comparable incomingOrderingVal = (Comparable) 
getNestedFieldVal((GenericRecord) incomingRecord,
-        
properties.getProperty(HoodiePayloadProps.PAYLOAD_ORDERING_FIELD_PROP_KEY), 
false);
+        
properties.getProperty(HoodiePayloadProps.PAYLOAD_ORDERING_FIELD_PROP_KEY), 
true);

Review comment:
       so we return null if not found ordering/precombine key. Shall we make 
its parent class behave in similar way?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to