linliu-code commented on code in PR #13580:
URL: https://github.com/apache/hudi/pull/13580#discussion_r2222778080


##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/io/FileGroupReaderBasedMergeHandle.java:
##########
@@ -108,6 +118,42 @@ public FileGroupReaderBasedMergeHandle(HoodieWriteConfig 
config, String instantT
     } else {
       this.cdcLogger = Option.empty();
     }
+    this.recordIterator = null;
+    init(operation, this.partitionPath, fileSlice.getBaseFile());
+  }
+
+  /**
+   * FG reader based generic merge handle, which is not just for compaction.
+   */
+  public FileGroupReaderBasedMergeHandle(HoodieWriteConfig config, String 
instantTime, HoodieTable<T, I, K, O> hoodieTable,
+                                         Iterator<HoodieRecord<T>> recordItr, 
String partitionPath, String fileId,
+                                         TaskContextSupplier 
taskContextSupplier, Option<BaseKeyGenerator> keyGeneratorOpt,
+                                         HoodieReaderContext<T> readerContext, 
String maxInstantTime,
+                                         HoodieRecord.HoodieRecordType 
enginRecordType) {
+    super(config, instantTime, hoodieTable, recordItr, partitionPath, fileId, 
taskContextSupplier, keyGeneratorOpt);
+    this.maxInstantTime = maxInstantTime;
+    this.keyToNewRecords = Collections.emptyMap();
+    this.readerContext = readerContext;
+    this.fileSlice = null;
+    this.recordIterator = recordItr;
+    this.operation = null;
+    if (hoodieTable.getMetaClient().getTableConfig().isCDCEnabled()) {
+      this.cdcLogger = Option.of(new HoodieCDCLogger(
+          instantTime,
+          config,
+          hoodieTable.getMetaClient().getTableConfig(),
+          partitionPath,
+          storage,
+          getWriterSchema(),
+          createLogWriter(instantTime, HoodieCDCUtils.CDC_LOGFILE_SUFFIX, 
Option.empty()),
+          IOUtils.getMaxMemoryPerPartitionMerge(taskContextSupplier, config)));
+    } else {
+      this.cdcLogger = Option.empty();
+    }
+    // If the table is a metadata table or the base file is an HFile, we use 
AVRO record type, otherwise we use the engine record type.
+    this.recordType = (hoodieTable.isMetadataTable()
+        || HFILE.getFileExtension().equals(hoodieTable.getBaseFileExtension()))
+        ? HoodieRecord.HoodieRecordType.AVRO : enginRecordType;

Review Comment:
   I have refactored the code. The comments should be outdated.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to