xiarixiaoyao commented on code in PR #6046:
URL: https://github.com/apache/hudi/pull/6046#discussion_r945546375
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/clustering/run/strategy/MultipleSparkJobExecutionStrategy.java:
##########
@@ -131,6 +161,53 @@ public abstract HoodieData<WriteStatus>
performClusteringWithRecordsRDD(final Ho
final
Map<String, String> strategyParams, final Schema schema,
final
List<HoodieFileGroupId> fileGroupIdList, final boolean preserveHoodieMetadata);
+ protected HoodieData<WriteStatus> performRowWrite(Dataset<Row> inputRecords,
Map<String, String> parameters) {
+ String uuid = UUID.randomUUID().toString();
+ parameters.put(HoodieWriteConfig.BULKINSERT_ROW_IDENTIFY_ID.key(), uuid);
+ try {
+ inputRecords.write()
+ .format("hudi")
+ .options(JavaConverters.mapAsScalaMapConverter(parameters).asScala())
+ .mode(SaveMode.Append)
+ .save(getWriteConfig().getBasePath());
+ List<WriteStatus> writeStatusList =
HoodieInternalWriteStatusCoordinator.get().getWriteStatuses(uuid)
+ .stream()
+ .map(internalWriteStatus -> {
+ WriteStatus status = new WriteStatus(
+ internalWriteStatus.isTrackSuccessRecords(),
internalWriteStatus.getFailureFraction());
+ status.setFileId(internalWriteStatus.getFileId());
+ status.setTotalRecords(internalWriteStatus.getTotalRecords());
+ status.setPartitionPath(internalWriteStatus.getPartitionPath());
+ status.setStat(internalWriteStatus.getStat());
+ return status;
+ }).collect(Collectors.toList());
+ return getEngineContext().parallelize(writeStatusList);
+ } finally {
+ HoodieInternalWriteStatusCoordinator.get().removeStatuses(uuid);
+ }
+ }
+
+ protected Map<String, String> buildHoodieRowParameters(int numOutputGroups,
String instantTime, Map<String, String> strategyParams, boolean
preserveHoodieMetadata) {
+ HashMap<String, String> params = new HashMap<>();
+ HoodieWriteConfig writeConfig = getWriteConfig();
+ params.put(HoodieWriteConfig.BULKINSERT_PARALLELISM_VALUE.key(),
String.valueOf(numOutputGroups));
+ params.put(HoodieWriteConfig.BULKINSERT_ROW_AUTO_COMMIT.key(),
String.valueOf(false));
Review Comment:
why false, the default value of this config is true
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]