n3nash commented on a change in pull request #2263:
URL: https://github.com/apache/hudi/pull/2263#discussion_r528095972
##########
File path:
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/SparkRDDWriteClient.java
##########
@@ -296,6 +298,57 @@ protected void completeCompaction(HoodieCommitMetadata
metadata, JavaRDD<WriteSt
return statuses;
}
+ @Override
+ protected HoodieWriteMetadata<JavaRDD<WriteStatus>> cluster(String
clusteringInstant, boolean shouldComplete) {
+ HoodieSparkTable<T> table = HoodieSparkTable.create(config, context);
+ HoodieTimeline pendingClusteringTimeline =
table.getActiveTimeline().filterPendingReplaceTimeline();
+ HoodieInstant inflightInstant =
HoodieTimeline.getReplaceCommitInflightInstant(clusteringInstant);
+ if (pendingClusteringTimeline.containsInstant(inflightInstant)) {
+ rollbackInflightClustering(inflightInstant, table);
+ table.getMetaClient().reloadActiveTimeline();
+ }
+ clusteringTimer = metrics.getClusteringCtx();
+ LOG.info("Starting clustering at " + clusteringInstant);
+ HoodieWriteMetadata<JavaRDD<WriteStatus>> clusteringMetadata =
table.clustering(context, clusteringInstant);
+ JavaRDD<WriteStatus> statuses = clusteringMetadata.getWriteStatuses();
+ if (shouldComplete && clusteringMetadata.getCommitMetadata().isPresent()) {
+ completeClustering((HoodieReplaceCommitMetadata)
clusteringMetadata.getCommitMetadata().get(), statuses, table,
clusteringInstant);
+ }
+ return clusteringMetadata;
+ }
+
+ protected void completeClustering(HoodieReplaceCommitMetadata metadata,
JavaRDD<WriteStatus> writeStatuses,
+ HoodieTable<T, JavaRDD<HoodieRecord<T>>,
JavaRDD<HoodieKey>, JavaRDD<WriteStatus>> table,
+ String clusteringCommitTime) {
+
+ List<HoodieWriteStat> writeStats =
writeStatuses.map(WriteStatus::getStat).collect();
+ if (!writeStatuses.filter(WriteStatus::hasErrors).isEmpty()) {
+ throw new HoodieClusteringException("Clustering failed to write to
files:"
+ +
writeStatuses.filter(WriteStatus::hasErrors).map(WriteStatus::getFileId).collect());
+ }
+ finalizeWrite(table, clusteringCommitTime, writeStats);
+ try {
+ LOG.info("Committing Clustering " + clusteringCommitTime + ". Finished
with result " + metadata);
+ table.getActiveTimeline().transitionReplaceInflightToComplete(
+ HoodieTimeline.getReplaceCommitInflightInstant(clusteringCommitTime),
+ Option.of(metadata.toJsonString().getBytes(StandardCharsets.UTF_8)));
+ } catch (IOException e) {
+ throw new HoodieClusteringException("unable to transition clustering
inflight to complete: " + clusteringCommitTime, e);
+ }
+
+ if (clusteringTimer != null) {
+ long durationInMs = metrics.getDurationInMs(clusteringTimer.stop());
+ try {
+
metrics.updateCommitMetrics(HoodieActiveTimeline.COMMIT_FORMATTER.parse(clusteringCommitTime).getTime(),
+ durationInMs, metadata, HoodieActiveTimeline.COMPACTION_ACTION);
Review comment:
Should this be a REPLACE_ACTION instead of COMPACTION_ACTION ?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]