zhuanshenbsj1 commented on code in PR #8394:
URL: https://github.com/apache/hudi/pull/8394#discussion_r1168119189


##########
hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/compact/HoodieFlinkCompactor.java:
##########
@@ -314,6 +314,9 @@ private void compact() throws Exception {
           .setParallelism(1);
 
       env.execute("flink_hudi_compaction_" + String.join(",", 
compactionInstantTimes));
+      if (conf.getBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED) && 
conf.getBoolean(FlinkOptions.CLEAN_OFFLINE_ENABLE)){
+        writeClient.clean();
+      }

Review Comment:
   CleanFunction#open only do clean when 
OptionsResolver.isInsertOverwrite(conf) is true.
   
    ```
    @Override
     public void open(Configuration parameters) throws Exception {
       super.open(parameters);
       if (conf.getBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED)) {
         this.writeClient = FlinkWriteClients.createWriteClient(conf, 
getRuntimeContext());
         this.executor = 
NonThrownExecutor.builder(LOG).waitForTasksFinish(true).build();
   
         if (OptionsResolver.isInsertOverwrite(conf)) {
           String instantTime = HoodieActiveTimeline.createNewInstantTime();
           LOG.info(String.format("exec sync clean with instant time %s...", 
instantTime));
           executor.execute(() -> writeClient.clean(instantTime), "wait for 
sync cleaning finish");
         }
       }
     }
   ```
   
   I think the real cleaning is actually in CompactionCommitSink#doCommit, and 
it depends on whether to enable configuration CLEAN_ASYNC_ENABLED.
   
   ```
     private void doCommit(String instant, Collection<CompactionCommitEvent> 
events) throws IOException {
       List<WriteStatus> statuses = events.stream()
           .map(CompactionCommitEvent::getWriteStatuses)
           .flatMap(Collection::stream)
           .collect(Collectors.toList());
   
       HoodieCommitMetadata metadata = 
CompactHelpers.getInstance().createCompactionMetadata(
           table, instant, HoodieListData.eager(statuses), 
writeClient.getConfig().getSchema());
   
       // commit the compaction
       this.writeClient.commitCompaction(instant, metadata, Option.empty());
   
       // Whether to clean up the old log file when compaction
       if (!conf.getBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED)) {
         this.writeClient.clean();
       }
     }
   ```
   
   Online async-clean and offline clean use the same config, So I added 
configuration CLEAN_OFFLINE_ENABLE to distinguish it from configuration 
CLEAN_ASYNC_ENABLED.
   



##########
hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/sink/clustering/HoodieFlinkClusteringJob.java:
##########
@@ -358,6 +358,9 @@ private void cluster() throws Exception {
           .setParallelism(1);
 
       env.execute("flink_hudi_clustering_" + clusteringInstant.getTimestamp());
+      if (conf.getBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED) && 
conf.getBoolean(FlinkOptions.CLEAN_OFFLINE_ENABLE)){
+        writeClient.clean();
+      }

Review Comment:
   CleanFunction#open only do clean when 
OptionsResolver.isInsertOverwrite(conf) is true.
   
   ```
     @Override
     public void open(Configuration parameters) throws Exception {
       super.open(parameters);
       if (conf.getBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED)) {
         this.writeClient = FlinkWriteClients.createWriteClient(conf, 
getRuntimeContext());
         this.executor = 
NonThrownExecutor.builder(LOG).waitForTasksFinish(true).build();
   
         if (OptionsResolver.isInsertOverwrite(conf)) {
           String instantTime = HoodieActiveTimeline.createNewInstantTime();
           LOG.info(String.format("exec sync clean with instant time %s...", 
instantTime));
           executor.execute(() -> writeClient.clean(instantTime), "wait for 
sync cleaning finish");
         }
       }
     }
   ```
   I think the real cleaning is actually in ClusteringCommitSink#doCommit, and 
it depends on whether to enable configuration CLEAN_ASYNC_ENABLED.
   
   ```
   
     private void doCommit(String instant, HoodieClusteringPlan clusteringPlan, 
List<ClusteringCommitEvent> events) {
       List<WriteStatus> statuses = events.stream()
           .map(ClusteringCommitEvent::getWriteStatuses)
           .flatMap(Collection::stream)
           .collect(Collectors.toList());
   
       HoodieWriteMetadata<List<WriteStatus>> writeMetadata = new 
HoodieWriteMetadata<>();
       writeMetadata.setWriteStatuses(statuses);
       
writeMetadata.setWriteStats(statuses.stream().map(WriteStatus::getStat).collect(Collectors.toList()));
       
writeMetadata.setPartitionToReplaceFileIds(getPartitionToReplacedFileIds(clusteringPlan,
 writeMetadata));
       validateWriteResult(clusteringPlan, instant, writeMetadata);
       if (!writeMetadata.getCommitMetadata().isPresent()) {
         HoodieCommitMetadata commitMetadata = CommitUtils.buildMetadata(
             writeMetadata.getWriteStats().get(),
             writeMetadata.getPartitionToReplaceFileIds(),
             Option.empty(),
             WriteOperationType.CLUSTER,
             this.writeClient.getConfig().getSchema(),
             HoodieTimeline.REPLACE_COMMIT_ACTION);
         writeMetadata.setCommitMetadata(Option.of(commitMetadata));
       }
       // commit the clustering
       this.table.getMetaClient().reloadActiveTimeline();
       this.writeClient.completeTableService(
           TableServiceType.CLUSTER, writeMetadata.getCommitMetadata().get(), 
table, instant);
   
       // whether to clean up the input base parquet files used for clustering
       if (!conf.getBoolean(FlinkOptions.CLEAN_ASYNC_ENABLED)) {
         LOG.info("Running inline clean");
         this.writeClient.clean();
       }
     }
   ```
   Online async-clean and offline clean use the same config, So I added 
configuration CLEAN_OFFLINE_ENABLE to distinguish it from configuration 
CLEAN_ASYNC_ENABLED.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to