vinothchandar commented on code in PR #10255:
URL: https://github.com/apache/hudi/pull/10255#discussion_r1486534637


##########
hudi-common/src/main/java/org/apache/hudi/common/table/read/IncrementalQueryAnalyzer.java:
##########
@@ -0,0 +1,428 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.common.table.read;
+
+import org.apache.hudi.common.model.HoodieTableType;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.table.log.InstantRange;
+import org.apache.hudi.common.table.timeline.CompletionTimeQueryView;
+import org.apache.hudi.common.table.timeline.HoodieArchivedTimeline;
+import org.apache.hudi.common.table.timeline.HoodieInstant;
+import org.apache.hudi.common.table.timeline.HoodieTimeline;
+import org.apache.hudi.common.util.ClusteringUtils;
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.VisibleForTesting;
+import org.apache.hudi.common.util.collection.Pair;
+
+import javax.annotation.Nullable;
+
+import java.util.ArrayList;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+import java.util.stream.IntStream;
+import java.util.stream.Stream;
+
+/**
+ * Analyzer for incremental queries.
+ *
+ * <p>The analyzer can supply info about the incremental queries including:
+ * <ul>
+ *   <li>The archived instant candidates;</li>
+ *   <li>The active instant candidates;</li>
+ *   <li>The instant filtering predicate, e.g the instant range;</li>
+ *   <li>Whether the query starts from the earliest;</li>
+ *   <li>Whether the query ends to the latest;</li>
+ *   <li>The max completion time used for fs view file slice version 
filtering.</li>
+ * </ul>
+ *
+ * <p><h2>Criteria for different query ranges:</h2>
+ *
+ * <table>
+ *   <tr>
+ *     <th>Query Range</th>
+ *     <th>File Handles Decoding</th>
+ *     <th>Instant Filtering Predicate</th>
+ *   </tr>
+ *   <tr>
+ *     <td>[earliest, _]</td>
+ *     <td>The latest snapshot files from table metadata</td>
+ *     <td>_</td>
+ *   </tr>
+ *   <tr>
+ *     <td>[earliest, endTime]</td>
+ *     <td>The latest snapshot files from table metadata</td>
+ *     <td>'_hoodie_commit_time' in setA, setA is a collection of all the 
instants completed before or on 'endTime'</td>
+ *   </tr>
+ *   <tr>
+ *     <td>[_, _]</td>
+ *     <td>The latest completed instant metadata</td>
+ *     <td>'_hoodie_commit_time' = i_n, i_n is the latest completed 
instant</td>
+ *   </tr>
+ *   <tr>
+ *     <td>[_, endTime]</td>
+ *     <td>i).find the last completed instant i_n before or on 'endTime;
+ *     ii). read the latest snapshot from table metadata if i_n is archived or 
the commit metadata if it is still active</td>
+ *     <td>'_hoodie_commit_time' = i_n</td>
+ *   </tr>
+ *   <tr>
+ *     <td>[startTime, _]</td>
+ *     <td>i).find the instant set setA, setA is a collection of all the 
instants completed after or on 'startTime';
+ *     ii). read the latest snapshot from table metadata if setA has archived 
instants or the commit metadata if all the instants are still active</td>
+ *     <td>'_hoodie_commit_time' in setA</td>
+ *   </tr>
+ *   <tr>
+ *     <td>[earliest, endTime]</td>
+ *     <td>i).find the instant set setA, setA is a collection of all the 
instants completed in the given time range;
+ *     ii). read the latest snapshot from table metadata if setA has archived 
instants or the commit metadata if all the instants are still active</td>
+ *     <td>'_hoodie_commit_time' in setA</td>
+ *   </tr>
+ * </table>
+ *
+ * <p> A range type is required for analyzing the query so that the query 
range boundary inclusiveness have clear semantics.
+ *
+ * <p>IMPORTANT: the reader may optionally choose to fall back to reading the 
latest snapshot if there are files missing from decoding the commit metadata.
+ */
+public class IncrementalQueryAnalyzer {
+  public static final String START_COMMIT_EARLIEST = "earliest";
+
+  private final HoodieTableMetaClient metaClient;
+  private final Option<String> startTime;
+  private final Option<String> endTime;
+  private final InstantRange.RangeType rangeType;
+  private final boolean skipCompaction;
+  private final boolean skipClustering;
+  private final int limit;
+
+  private IncrementalQueryAnalyzer(
+      HoodieTableMetaClient metaClient,
+      String startTime,
+      String endTime,
+      InstantRange.RangeType rangeType,
+      boolean skipCompaction,
+      boolean skipClustering,
+      int limit) {
+    this.metaClient = metaClient;
+    this.startTime = Option.ofNullable(startTime);
+    this.endTime = Option.ofNullable(endTime);
+    this.rangeType = rangeType;
+    this.skipCompaction = skipCompaction;
+    this.skipClustering = skipClustering;
+    this.limit = limit;
+  }
+
+  /**
+   * Returns a builder.
+   */
+  public static Builder builder() {
+    return new Builder();
+  }
+
+  /**
+   * Analyzes the incremental query context with given completion time range.
+   *
+   * @return An incremental query context including the instant time range 
info.
+   */
+  public QueryContext analyze() {
+    try (CompletionTimeQueryView completionTimeQueryView = new 
CompletionTimeQueryView(this.metaClient)) {
+      if (completionTimeQueryView.isEmptyTable()) {
+        // no dataset committed in the table
+        return QueryContext.EMPTY;
+      }
+      HoodieTimeline readTimeline = getReadTimeline(this.metaClient);
+      List<String> instantTimeList = 
completionTimeQueryView.getStartTimes(readTimeline, startTime, endTime, 
rangeType);
+      if (instantTimeList.isEmpty()) {
+        // no instants completed within the give time range, returns early.
+        return QueryContext.EMPTY;
+      }
+      // get hoodie instants
+      Pair<List<String>, List<String>> splitInstantTime = 
splitInstantByActiveness(instantTimeList, completionTimeQueryView);
+      Set<String> instantTimeSet = new HashSet<>(instantTimeList);
+      List<String> archivedInstantTime = splitInstantTime.getKey();
+      List<String> activeInstantTime = splitInstantTime.getValue();
+      List<HoodieInstant> archivedInstants = new ArrayList<>();
+      List<HoodieInstant> activeInstants = new ArrayList<>();
+      HoodieTimeline archivedReadTimeline = null;
+      if (!activeInstantTime.isEmpty()) {
+        activeInstants = readTimeline.getInstantsAsStream().filter(instant -> 
instantTimeSet.contains(instant.getTimestamp())).collect(Collectors.toList());
+        if (limit > 0 && limit < activeInstants.size()) {
+          // streaming read speed limit, limits the maximum number of commits 
allowed to read for each run
+          activeInstants = activeInstants.subList(0, limit);
+        }
+      }
+      if (!archivedInstantTime.isEmpty()) {
+        archivedReadTimeline = getArchivedReadTimeline(metaClient, 
archivedInstantTime.get(0));
+        archivedInstants = 
archivedReadTimeline.getInstantsAsStream().filter(instant -> 
instantTimeSet.contains(instant.getTimestamp())).collect(Collectors.toList());
+      }
+      List<String> instants = Stream.concat(archivedInstants.stream(), 
activeInstants.stream()).map(HoodieInstant::getTimestamp).collect(Collectors.toList());
+      if (instants.isEmpty()) {
+        // no instants completed within the give time range, returns early.
+        return QueryContext.EMPTY;
+      }
+      if (startTime.isEmpty() && endTime.isPresent()) {
+        instants = Collections.singletonList(instants.get(instants.size() - 
1));
+      }
+      String lastInstant = instants.get(instants.size() - 1);
+      // keep the same semantics with streaming read, default start from the 
latest commit
+      String startInstant = 
START_COMMIT_EARLIEST.equalsIgnoreCase(startTime.orElse(null)) ? null : 
startTime.isEmpty() ? lastInstant : instants.get(0);

Review Comment:
   I think some of these semantics can be improved. Will take it up as follow 
on. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to