yihua commented on a change in pull request #4531: URL: https://github.com/apache/hudi/pull/4531#discussion_r785677265
########## File path: hudi-common/src/main/scala/org/apache/hudi/AbstractHoodieTableFileIndex.scala ########## @@ -0,0 +1,300 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hudi + +import org.apache.hadoop.fs.{FileStatus, Path} +import org.apache.hudi.common.config.{HoodieMetadataConfig, TypedProperties} +import org.apache.hudi.common.engine.HoodieEngineContext +import org.apache.hudi.common.fs.FSUtils +import org.apache.hudi.common.model.HoodieTableType.MERGE_ON_READ +import org.apache.hudi.common.model.{FileSlice, HoodieTableQueryType} +import org.apache.hudi.common.table.HoodieTableMetaClient +import org.apache.hudi.common.table.view.{FileSystemViewStorageConfig, HoodieTableFileSystemView} + +import scala.collection.JavaConverters._ +import scala.collection.JavaConversions._ +import scala.collection.mutable + +/** + * Common (engine-agnostic) File Index implementation enabling individual query engines to + * list Hudi Table contents based on the + * + * <ul> + * <li>Table type (MOR, COW)</li> + * <li>Query type (snapshot, read_optimized, incremental)</li> + * <li>Query instant/range</li> + * </ul> + * + * @param engineContext Hudi engine-specific context + * @param metaClient Hudi table's meta-client + * @param configProperties unifying configuration (in the form of generic properties) + * @param queryType target query type + * @param queryPaths target DFS paths being queried + * @param specifiedQueryInstant instant as of which table is being queried + * @param shouldIncludePendingCommits flags whether file-index should exclude any pending operations + * @param fileStatusCache transient cache of fetched [[FileStatus]]es + */ +abstract class AbstractHoodieTableFileIndex(engineContext: HoodieEngineContext, Review comment: Let's rename the class with `Base` prefix here. ########## File path: hudi-hadoop-mr/src/main/java/org/apache/hudi/hadoop/HoodieFileInputFormatBase.java ########## @@ -102,18 +138,98 @@ public final void setConf(Configuration conf) { // process snapshot queries next. List<Path> snapshotPaths = inputPathHandler.getSnapshotPaths(); if (snapshotPaths.size() > 0) { - returns.addAll(HoodieInputFormatUtils.filterFileStatusForSnapshotMode(job, tableMetaClientMap, snapshotPaths, includeLogFilesForSnapShotView())); + returns.addAll(listStatusForSnapshotMode(job, tableMetaClientMap, snapshotPaths)); } return returns.toArray(new FileStatus[0]); } + @Nonnull + private List<FileStatus> listStatusForSnapshotMode(JobConf job, + Map<String, HoodieTableMetaClient> tableMetaClientMap, + List<Path> snapshotPaths) throws IOException { + HoodieLocalEngineContext engineContext = new HoodieLocalEngineContext(job); + List<FileStatus> targetFiles = new ArrayList<>(); + + TypedProperties props = new TypedProperties(new Properties()); + + Map<HoodieTableMetaClient, List<Path>> groupedPaths = + HoodieInputFormatUtils.groupSnapshotPathsByMetaClient(tableMetaClientMap.values(), snapshotPaths); + + for (Map.Entry<HoodieTableMetaClient, List<Path>> entry : groupedPaths.entrySet()) { + HoodieTableMetaClient tableMetaClient = entry.getKey(); + List<Path> partitionPaths = entry.getValue(); + + // Hive job might specify a max commit instant up to which table's state + // should be examined. We simply pass it as query's instant to the file-index + Option<String> queryCommitInstant = + HoodieHiveUtils.getMaxCommit(job, tableMetaClient.getTableConfig().getTableName()); + + boolean shouldIncludePendingCommits = + HoodieHiveUtils.shouldIncludePendingCommits(job, tableMetaClient.getTableConfig().getTableName()); + + HiveHoodieTableFileIndex fileIndex = + new HiveHoodieTableFileIndex( + engineContext, + tableMetaClient, + props, + HoodieTableQueryType.QUERY_TYPE_SNAPSHOT, + partitionPaths, + queryCommitInstant, + shouldIncludePendingCommits); + + Map<String, Seq<FileSlice>> partitionedFileSlices = + JavaConverters.mapAsJavaMapConverter(fileIndex.listFileSlices()).asJava(); + + targetFiles.addAll( + partitionedFileSlices.values() + .stream() + .flatMap(seq -> JavaConverters.seqAsJavaListConverter(seq).asJava().stream()) + .map(fileSlice -> { + Option<HoodieBaseFile> baseFileOpt = fileSlice.getBaseFile(); + Option<HoodieLogFile> latestLogFileOpt = fileSlice.getLatestLogFile(); + if (baseFileOpt.isPresent()) { + return getFileStatusUnchecked(baseFileOpt); + } else if (includeLogFilesForSnapShotView() && latestLogFileOpt.isPresent()) { + return createRealtimeFileStatusUnchecked(latestLogFileOpt.get(), fileSlice.getLogFiles()); + } else { + throw new IllegalStateException("Invalid state: either base-file or log-file should be present"); + } + }) + .collect(Collectors.toList()) + ); + } + + // TODO cleanup + validate(targetFiles, listStatusForSnapshotModeLegacy(job, tableMetaClientMap, snapshotPaths)); Review comment: Got it. I'll take another look once done. ########## File path: hudi-hadoop-mr/src/test/java/org/apache/hudi/hadoop/testutils/InputFormatTestUtil.java ########## @@ -175,8 +182,12 @@ public static File prepareParquetTable(java.nio.file.Path basePath, Schema schem public static File prepareParquetTable(java.nio.file.Path basePath, Schema schema, int numberOfFiles, int numberOfRecords, String commitNumber, HoodieTableType tableType) throws IOException { HoodieTestUtils.init(HoodieTestUtils.getDefaultHadoopConf(), basePath.toString(), tableType, HoodieFileFormat.PARQUET); + Review comment: Got it, Sg. I'll let you make the judgement. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
