danny0405 commented on code in PR #17490:
URL: https://github.com/apache/hudi/pull/17490#discussion_r2605565541


##########
hudi-flink-datasource/hudi-flink/src/main/java/org/apache/hudi/source/stats/RecordLevelIndex.java:
##########
@@ -0,0 +1,211 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.source.stats;
+
+import org.apache.hudi.client.common.HoodieFlinkEngineContext;
+import org.apache.hudi.common.data.HoodieListData;
+import org.apache.hudi.common.data.HoodiePairData;
+import org.apache.hudi.common.model.FileSlice;
+import org.apache.hudi.common.model.HoodieRecordGlobalLocation;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.common.util.HoodieDataUtils;
+import org.apache.hudi.common.util.Option;
+import org.apache.hudi.common.util.VisibleForTesting;
+import org.apache.hudi.common.util.collection.Pair;
+import org.apache.hudi.configuration.FlinkOptions;
+import org.apache.hudi.configuration.OptionsResolver;
+import org.apache.hudi.keygen.KeyGenUtils;
+import org.apache.hudi.keygen.KeyGenerator;
+import org.apache.hudi.metadata.HoodieTableMetadata;
+import org.apache.hudi.metadata.HoodieTableMetadataUtil;
+import org.apache.hudi.source.ExpressionEvaluators;
+import org.apache.hudi.util.StreamerUtil;
+
+import org.apache.flink.configuration.Configuration;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.List;
+import java.util.Objects;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+/**
+ * An index support implementation that leverages Record Level Index to prune 
file slices.
+ */
+public class RecordLevelIndex implements FlinkBaseIndex {
+  private static final long serialVersionUID = 1L;
+  private static final Logger LOG = 
LoggerFactory.getLogger(RecordLevelIndex.class);
+
+  private final String basePath;
+  private final Configuration conf;
+  private final List<String> hoodieKeysFromFilter;
+  private final HoodieTableMetaClient metaClient;
+  private HoodieTableMetadata metadataTable;
+
+  private RecordLevelIndex(
+      String basePath,
+      Configuration conf,
+      HoodieTableMetaClient metaClient,
+      List<String> hoodieKeysFromFilter) {
+    this.basePath = basePath;
+    this.conf = conf;
+    this.metaClient = metaClient;
+    this.hoodieKeysFromFilter = hoodieKeysFromFilter;
+  }
+
+  @Override
+  public String getIndexPartitionName() {
+    return HoodieTableMetadataUtil.PARTITION_NAME_RECORD_INDEX;
+  }
+
+  @Override
+  public boolean isIndexAvailable() {
+    return metaClient.getTableConfig().isMetadataTableAvailable()
+        && 
metaClient.getTableConfig().getMetadataPartitions().contains(HoodieTableMetadataUtil.PARTITION_NAME_RECORD_INDEX);
+  }
+
+  public HoodieTableMetadata getMetadataTable() {
+    // initialize the metadata table lazily
+    if (this.metadataTable == null) {
+      this.metadataTable = 
metaClient.getTableFormat().getMetadataFactory().create(
+          HoodieFlinkEngineContext.DEFAULT,
+          metaClient.getStorage(),
+          StreamerUtil.metadataConfig(conf),
+          basePath);
+    }
+    return this.metadataTable;
+  }
+
+  public List<FileSlice> computeCandidateFileSlices(List<FileSlice> 
fileSlices) {
+    if (!isIndexAvailable()) {
+      return fileSlices;
+    }
+    HoodiePairData<String, HoodieRecordGlobalLocation> recordIndexData =
+        
getMetadataTable().readRecordIndexLocationsWithKeys(HoodieListData.eager(hoodieKeysFromFilter));
+    try {
+      List<Pair<String, HoodieRecordGlobalLocation>> recordIndexLocations = 
HoodieDataUtils.dedupeAndCollectAsList(recordIndexData);
+      Set<String> fileIds = recordIndexLocations.stream()
+          .map(pair -> 
pair.getValue().getFileId()).collect(Collectors.toSet());
+      return fileSlices.stream().filter(fileSlice -> 
fileIds.contains(fileSlice.getFileId())).collect(Collectors.toList());
+    } finally {
+      // Clean up the RDD to avoid memory leaks
+      recordIndexData.unpersistWithDependencies();
+    }
+  }
+
+  public static Option<RecordLevelIndex> create(String basePath, Configuration 
conf, HoodieTableMetaClient metaClient, List<ExpressionEvaluators.Evaluator> 
evaluators) {
+    if (evaluators.isEmpty() || 
!FlinkOptions.QUERY_TYPE_SNAPSHOT.equalsIgnoreCase(conf.get(FlinkOptions.QUERY_TYPE)))
 {
+      return Option.empty();
+    }
+    if (metaClient == null) {
+      metaClient = StreamerUtil.createMetaClient(conf);
+    }
+    if 
(KeyGenUtils.mayUseNewEncodingForComplexKeyGen(metaClient.getTableConfig())) {
+      return Option.empty();
+    }
+
+    String[] recordKeyFields = 
metaClient.getTableConfig().getRecordKeyFields().orElse(new String[0]);
+    if (recordKeyFields.length == 0) {
+      return Option.empty();
+    }
+    List<String> hoodieKeysFromFilter = computeHoodieKeyFromFilters(conf, 
metaClient, evaluators, recordKeyFields);
+    if (hoodieKeysFromFilter.isEmpty() || hoodieKeysFromFilter.size() > 
conf.get(FlinkOptions.RECORD_INDEX_KEYS_MAX_COUNT)) {
+      LOG.info("Hoodie keys from query predicate: {}, key number: {}, maximum 
value: {}.",
+          hoodieKeysFromFilter, hoodieKeysFromFilter.size(), 
conf.get(FlinkOptions.RECORD_INDEX_KEYS_MAX_COUNT));
+      return Option.empty();
+    }
+    return Option.of(new RecordLevelIndex(basePath, conf, metaClient, 
hoodieKeysFromFilter));
+  }
+
+  /**
+   * Given query filters, it filters the EqualTo and IN queries on record key 
columns and
+   * returns the list of record key literals present in the query, for example:
+   * <p>
+   * filter1: `key1` = 'val1', returns {"val1"}
+   * filter2: `key1` in ('val1', 'val2', 'val3'), returns {"val1", "vale", 
"val3"}
+   * filter2: `key1` = 'val1' AND `key2` in ('val2', 'val3'), returns 
{"key1:val1,key2:val2", "key1:val1,key2:val3"}
+   */
+  @VisibleForTesting
+  public static List<String> computeHoodieKeyFromFilters(
+      Configuration conf, HoodieTableMetaClient metaClient, 
List<ExpressionEvaluators.Evaluator> evaluators, String[] keyFields) {
+    String[] partitionFields = 
metaClient.getTableConfig().getPartitionFields().orElse(new String[0]);
+    // align with the check logic in RowDataKeyGen
+    boolean isComplexRecordKey = keyFields.length > 1 || 
partitionFields.length > 1 && 
!OptionsResolver.useComplexKeygenNewEncoding(conf);
+    List<String> hoodieKeys = new ArrayList<>();
+    for (String keyField: keyFields) {
+      List<String> recordKeys = new ArrayList<>();
+      for (ExpressionEvaluators.Evaluator evaluator: evaluators) {
+        // if there exists multiple ref fields in an evaluator, ignore this 
evaluator, e.g., key = 'key1' or age = 20
+        List<Object> literals = collectLiterals(evaluator, keyField);
+        literals.forEach(val -> recordKeys.add(isComplexRecordKey ? keyField + 
KeyGenerator.DEFAULT_COLUMN_VALUE_SEPARATOR + val : val.toString()));

Review Comment:
   not sure whether `#toString` works for all data types, like in 
`ExpressionUtils.getValueFromLiteral`, we handle the value decoding for 
different data types, expecially for time, timestamp and date.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to