satishkotha commented on a change in pull request #1312: [HUDI-571] Add 
"compactions show archived" command to CLI
URL: https://github.com/apache/incubator-hudi/pull/1312#discussion_r378475212
 
 

 ##########
 File path: 
hudi-cli/src/main/java/org/apache/hudi/cli/commands/CompactionCommand.java
 ##########
 @@ -95,51 +101,9 @@ public String compactionsAll(
       throws IOException {
     HoodieTableMetaClient client = checkAndGetMetaClient();
     HoodieActiveTimeline activeTimeline = client.getActiveTimeline();
-    HoodieTimeline timeline = activeTimeline.getCommitsAndCompactionTimeline();
-    HoodieTimeline commitTimeline = 
activeTimeline.getCommitTimeline().filterCompletedInstants();
-    Set<String> committed = 
commitTimeline.getInstants().map(HoodieInstant::getTimestamp).collect(Collectors.toSet());
-
-    List<HoodieInstant> instants = 
timeline.getReverseOrderedInstants().collect(Collectors.toList());
-    List<Comparable[]> rows = new ArrayList<>();
-    for (HoodieInstant instant : instants) {
-      HoodieCompactionPlan compactionPlan = null;
-      if (!HoodieTimeline.COMPACTION_ACTION.equals(instant.getAction())) {
-        try {
-          // This could be a completed compaction. Assume a compaction request 
file is present but skip if fails
-          compactionPlan = AvroUtils.deserializeCompactionPlan(
-              activeTimeline.readCompactionPlanAsBytes(
-                  
HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get());
-        } catch (HoodieIOException ioe) {
-          // SKIP
-        }
-      } else {
-        compactionPlan = 
AvroUtils.deserializeCompactionPlan(activeTimeline.readCompactionPlanAsBytes(
-            
HoodieTimeline.getCompactionRequestedInstant(instant.getTimestamp())).get());
-      }
-
-      if (null != compactionPlan) {
-        State state = instant.getState();
-        if (committed.contains(instant.getTimestamp())) {
-          state = State.COMPLETED;
-        }
-        if (includeExtraMetadata) {
-          rows.add(new Comparable[] {instant.getTimestamp(), state.toString(),
-              compactionPlan.getOperations() == null ? 0 : 
compactionPlan.getOperations().size(),
-              compactionPlan.getExtraMetadata().toString()});
-        } else {
-          rows.add(new Comparable[] {instant.getTimestamp(), state.toString(),
-              compactionPlan.getOperations() == null ? 0 : 
compactionPlan.getOperations().size()});
-        }
-      }
-    }
-
-    Map<String, Function<Object, String>> fieldNameToConverterMap = new 
HashMap<>();
-    TableHeader header = new TableHeader().addTableHeaderField("Compaction 
Instant Time").addTableHeaderField("State")
-        .addTableHeaderField("Total FileIds to be Compacted");
-    if (includeExtraMetadata) {
-      header = header.addTableHeaderField("Extra Metadata");
-    }
-    return HoodiePrintHelper.print(header, fieldNameToConverterMap, 
sortByField, descending, limit, headerOnly, rows);
+    return printAllCompactions(activeTimeline,
+            compactionPlanReader(this::readCompactionPlanForActiveTimeline, 
activeTimeline),
 
 Review comment:
   @nbalajee printAllCompactions only calls compcationPlanReader for commits 
and compactions. As part of refactor, timeline.getCommitsAndCompactionTimeline 
has been moved into printAllCompactions (to reuse between active/archive 
timelines). I just verified this works as expected even in presence of cleans. 
   Let me know if you think there is a better way to organize this.

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to