yihua commented on code in PR #13092:
URL: https://github.com/apache/hudi/pull/13092#discussion_r2029404853
##########
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/MergeIntoHoodieTableCommand.scala:
##########
@@ -1081,6 +1082,18 @@ object MergeIntoHoodieTableCommand {
parameters.getOrElse(config.key,
config.defaultValue().toString).toBoolean
}.getOrElse(false)
}
+
+ def useCustomMergeMode(parameters: Map[String, String]): Boolean = {
+ val inferredMergeConfigs = HoodieTableConfig.inferCorrectMergingBehavior(
+
RecordMergeMode.getValue(parameters.getOrElse(DataSourceWriteOptions.RECORD_MERGE_MODE.key(),
null)),
+ parameters.getOrElse(DataSourceWriteOptions.PAYLOAD_CLASS_NAME.key(),
""),
+
parameters.getOrElse(DataSourceWriteOptions.RECORD_MERGE_STRATEGY_ID.key(), ""),
+ parameters.getOrElse(PRECOMBINE_FIELD.key(), null),
+ HoodieTableVersion.fromVersionCode(parameters.getOrElse(
+ HoodieWriteConfig.WRITE_TABLE_VERSION.key,
+ HoodieTableVersion.current().versionCode().toString).toInt))
+ inferredMergeConfigs.getLeft.equals(RecordMergeMode.CUSTOM)
Review Comment:
No need to infer the merge mode as the merge mode table config is always
available in table version 8.
##########
hudi-spark-datasource/hudi-spark/src/main/scala/org/apache/spark/sql/hudi/command/MergeIntoHoodieTableCommand.scala:
##########
@@ -495,7 +495,8 @@ case class MergeIntoHoodieTableCommand(mergeInto:
MergeIntoTable) extends Hoodie
&& updatingActions.nonEmpty
&& (parameters.getOrElse(HoodieWriteConfig.WRITE_TABLE_VERSION.key,
HoodieTableVersion.current().versionCode().toString).toInt
>= HoodieTableVersion.EIGHT.versionCode())
- && !useGlobalIndex(parameters))
+ && !useGlobalIndex(parameters)
Review Comment:
Is this because global index does not use the file group reader to read
records causing the failure during tagging when the log blocks are merged with
the base file?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]