yuzhaojing commented on a change in pull request #3134:
URL: https://github.com/apache/hudi/pull/3134#discussion_r657638827



##########
File path: 
hudi-flink/src/main/java/org/apache/hudi/sink/bootstrap/BootstrapFunction.java
##########
@@ -168,33 +178,74 @@ private HoodieFlinkTable getTable() {
    * @param partitionPath The partition path
    */
   @SuppressWarnings("unchecked")
-  private void loadRecords(String partitionPath, Collector<O> out) {
+  private void loadRecords(String partitionPath, Collector<O> out) throws 
Exception {
     long start = System.currentTimeMillis();
+
     BaseFileUtils fileUtils = 
BaseFileUtils.getInstance(this.hoodieTable.getBaseFileFormat());
-    List<HoodieBaseFile> latestBaseFiles =
-        HoodieIndexUtils.getLatestBaseFilesForPartition(partitionPath, 
this.hoodieTable);
-    LOG.info("All baseFile in partition {} size = {}", partitionPath, 
latestBaseFiles.size());
+    Schema schema = new 
TableSchemaResolver(this.hoodieTable.getMetaClient()).getTableAvroSchema();
 
     final int parallelism = getRuntimeContext().getNumberOfParallelSubtasks();
     final int maxParallelism = 
getRuntimeContext().getMaxNumberOfParallelSubtasks();
     final int taskID = getRuntimeContext().getIndexOfThisSubtask();
-    for (HoodieBaseFile baseFile : latestBaseFiles) {
-      boolean shouldLoad = KeyGroupRangeAssignment.assignKeyToParallelOperator(
-          baseFile.getFileId(), maxParallelism, parallelism) == taskID;
-
-      if (shouldLoad) {
-        LOG.info("Load records from file {}.", baseFile);
-        final List<HoodieKey> hoodieKeys;
-        try {
-          hoodieKeys =
-              fileUtils.fetchRecordKeyPartitionPath(this.hadoopConf, new 
Path(baseFile.getPath()));
-        } catch (Exception e) {
-          throw new HoodieException(String.format("Error when loading record 
keys from file: %s", baseFile), e);
-        }
 
-        for (HoodieKey hoodieKey : hoodieKeys) {
-          out.collect((O) new IndexRecord(generateHoodieRecord(hoodieKey, 
baseFile)));
-        }
+    Option<HoodieInstant> latestCommitTime = 
this.hoodieTable.getMetaClient().getCommitsTimeline()
+        .filterCompletedInstants().lastInstant();
+
+    if (latestCommitTime.isPresent()) {
+      List<FileSlice> fileSlices = this.hoodieTable.getSliceView()
+          .getLatestFileSlicesBeforeOrOn(partitionPath, 
latestCommitTime.get().getTimestamp(), true)
+          .collect(toList());
+
+      for (FileSlice fileSlice : fileSlices) {
+        // load parquet records
+        fileSlice.getBaseFile().ifPresent(baseFile -> {
+          // filter out crushed files
+          if (baseFile.getFileSize() <= 0) {
+            return;
+          }
+
+          if (shouldLoadFileId(baseFile.getFileId(), maxParallelism, 
parallelism, taskID)) {
+            LOG.info("Load records from file {}.", baseFile);
+            final List<HoodieKey> hoodieKeys;
+            try {
+              hoodieKeys =
+                  fileUtils.fetchRecordKeyPartitionPath(this.hadoopConf, new 
Path(baseFile.getPath()));
+            } catch (Exception e) {
+              throw new HoodieException(String.format("Error when loading 
record keys from file: %s", baseFile), e);
+            }
+
+            for (HoodieKey hoodieKey : hoodieKeys) {
+              out.collect((O) new IndexRecord(generateHoodieRecord(hoodieKey, 
baseFile)));
+            }
+          }
+        });
+
+        // load avro log records
+        fileSlice.getLogFiles().forEach(logFile -> {
+          // filter out crushed files

Review comment:
       ok, I already modify this.




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


Reply via email to