cshuo commented on code in PR #18348:
URL: https://github.com/apache/hudi/pull/18348#discussion_r3050368530


##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/HoodieBackedTableMetadataWriter.java:
##########
@@ -432,107 +412,78 @@ private boolean initializeFromFilesystem(String 
dataTableInstantTime, List<Metad
         partitionInfoList = Collections.emptyList();
       }
     }
-    Map<String, Map<String, Long>> partitionIdToAllFilesMap = 
partitionInfoList.stream()
-        .map(p -> {
-          String partitionName = 
HoodieTableMetadataUtil.getPartitionIdentifierForFilesPartition(p.getRelativePath());
-          return Pair.of(partitionName, p.getFilenameToSizeMap());
-        })
-        .collect(Collectors.toMap(Pair::getKey, Pair::getValue));
-
-    // validate that each index is eligible to be initialized
-    Iterator<MetadataPartitionType> iterator = partitionsToInit.iterator();
-    while (iterator.hasNext()) {
-      MetadataPartitionType partitionType = iterator.next();
-      if (partitionType == PARTITION_STATS && 
!dataMetaClient.getTableConfig().isTablePartitioned()) {
-        // Partition stats index cannot be enabled for a non-partitioned table
-        iterator.remove();
-        this.enabledPartitionTypes.remove(partitionType);
-      }
-    }
+    Map<String, List<FileInfo>> partitionIdToAllFilesMap = 
DirectoryInfo.getPartitionToFileInfo(partitionInfoList);
+    Lazy<List<FileSliceAndPartition>> lazyLatestMergedPartitionFileSliceList = 
getLazyLatestMergedPartitionFileSliceList();
 
-    // For a fresh table, defer RLI initialization
-    if (dataWriteConfig.getMetadataConfig().shouldDeferRliInitForFreshTable() 
&& this.enabledPartitionTypes.contains(RECORD_INDEX)
-        && 
dataMetaClient.getActiveTimeline().filterCompletedInstants().countInstants() == 
0) {
-      this.enabledPartitionTypes.remove(RECORD_INDEX);
-      partitionsToInit.remove(RECORD_INDEX);
+    // FILES partition should always be initialized first if enabled
+    if (!filesPartitionAvailable) {
+      initializeMetadataPartition(FILES, 
indexerMapForPartitionsToInit.get(FILES),
+          dataTableInstantTime, partitionIdToAllFilesMap, 
lazyLatestMergedPartitionFileSliceList);
+      hasPartitionsStateChanged = true;
     }
 
-    Lazy<List<Pair<String, FileSlice>>> lazyLatestMergedPartitionFileSliceList 
= getLazyLatestMergedPartitionFileSliceList();
-    for (MetadataPartitionType partitionType : partitionsToInit) {
-      // Find the commit timestamp to use for this partition. Each 
initialization should use its own unique commit time.
-      String instantTimeForPartition = 
generateUniqueInstantTime(dataTableInstantTime);
-      String partitionTypeName = partitionType.name();
-      LOG.info("Initializing MDT partition {} at instant {}", 
partitionTypeName, instantTimeForPartition);
-      String relativePartitionPath;
-      Pair<Integer, HoodieData<HoodieRecord>> fileGroupCountAndRecordsPair;
-      Lazy<Option<HoodieSchema>> tableSchema = Lazy.lazily(() -> 
HoodieTableMetadataUtil.tryResolveSchemaForTable(dataMetaClient));
-      try {
-        switch (partitionType) {
-          case FILES:
-            fileGroupCountAndRecordsPair = 
initializeFilesPartition(partitionIdToAllFilesMap);
-            initializeFilegroupsAndCommit(partitionType, 
FILES.getPartitionPath(), fileGroupCountAndRecordsPair, 
instantTimeForPartition);
-            break;
-          case BLOOM_FILTERS:
-            fileGroupCountAndRecordsPair = 
initializeBloomFiltersPartition(dataTableInstantTime, partitionIdToAllFilesMap);
-            initializeFilegroupsAndCommit(partitionType, 
BLOOM_FILTERS.getPartitionPath(), fileGroupCountAndRecordsPair, 
instantTimeForPartition);
-            break;
-          case COLUMN_STATS:
-            Pair<List<String>, Pair<Integer, HoodieData<HoodieRecord>>> 
colStatsColumnsAndRecord = 
initializeColumnStatsPartition(partitionIdToAllFilesMap, tableSchema);
-            fileGroupCountAndRecordsPair = colStatsColumnsAndRecord.getValue();
-            initializeFilegroupsAndCommit(partitionType, 
COLUMN_STATS.getPartitionPath(), fileGroupCountAndRecordsPair, 
instantTimeForPartition, colStatsColumnsAndRecord.getKey());
-            break;
-          case RECORD_INDEX:
-            boolean isPartitionedRLI = 
dataWriteConfig.isRecordLevelIndexEnabled();
-            
initializeFilegroupsAndCommitToRecordIndexPartition(instantTimeForPartition, 
lazyLatestMergedPartitionFileSliceList, isPartitionedRLI);
-            break;
-          case EXPRESSION_INDEX:
-            Set<String> expressionIndexPartitionsToInit = 
getExpressionIndexPartitionsToInit(partitionType, 
dataWriteConfig.getMetadataConfig(), dataMetaClient);
-            if (expressionIndexPartitionsToInit.size() != 1) {
-              if (expressionIndexPartitionsToInit.size() > 1) {
-                LOG.warn("Skipping expression index initialization as only one 
expression index bootstrap at a time is supported for now. Provided: {}", 
expressionIndexPartitionsToInit);
-              }
-              continue;
-            }
-            relativePartitionPath = 
expressionIndexPartitionsToInit.iterator().next();
-            fileGroupCountAndRecordsPair = 
initializeExpressionIndexPartition(relativePartitionPath, dataTableInstantTime, 
lazyLatestMergedPartitionFileSliceList, tableSchema);
-            initializeFilegroupsAndCommit(partitionType, 
relativePartitionPath, fileGroupCountAndRecordsPair, instantTimeForPartition);
-            break;
-          case PARTITION_STATS:
-            // For PARTITION_STATS, COLUMN_STATS should also be enabled
-            if (!dataWriteConfig.isMetadataColumnStatsIndexEnabled()) {
-              LOG.debug("Skipping partition stats initialization as column 
stats index is not enabled. Please enable {}",
-                  
HoodieMetadataConfig.ENABLE_METADATA_INDEX_COLUMN_STATS.key());
-              continue;
-            }
-            fileGroupCountAndRecordsPair = 
initializePartitionStatsIndex(lazyLatestMergedPartitionFileSliceList, 
tableSchema);
-            initializeFilegroupsAndCommit(partitionType, 
PARTITION_STATS.getPartitionPath(), fileGroupCountAndRecordsPair, 
instantTimeForPartition);
-            break;
-          case SECONDARY_INDEX:
-            Set<String> secondaryIndexPartitionsToInit = 
getSecondaryIndexPartitionsToInit(partitionType, 
dataWriteConfig.getMetadataConfig(), dataMetaClient);
-            if (secondaryIndexPartitionsToInit.size() != 1) {
-              if (secondaryIndexPartitionsToInit.size() > 1) {
-                LOG.warn("Skipping secondary index initialization as only one 
secondary index bootstrap at a time is supported for now. Provided: {}", 
secondaryIndexPartitionsToInit);
-              }
-              continue;
-            }
-            relativePartitionPath = 
secondaryIndexPartitionsToInit.iterator().next();
-            fileGroupCountAndRecordsPair = 
initializeSecondaryIndexPartition(relativePartitionPath, 
lazyLatestMergedPartitionFileSliceList);
-            initializeFilegroupsAndCommit(partitionType, 
relativePartitionPath, fileGroupCountAndRecordsPair, instantTimeForPartition);
-            break;
-          default:
-            throw new HoodieMetadataException(String.format("Unsupported MDT 
partition type: %s", partitionType));
-        }
-      } catch (Exception e) {
-        String metricKey = partitionType.getPartitionPath() + "_" + 
HoodieMetadataMetrics.BOOTSTRAP_ERR_STR;
-        metrics.ifPresent(m -> m.setMetric(metricKey, 1));
-        String errMsg = String.format("Bootstrap on %s partition failed for 
%s",
-            partitionType.getPartitionPath(), 
metadataMetaClient.getBasePath());
-        LOG.error(errMsg, e);
-        throw new HoodieMetadataException(errMsg, e);
+    indexerMapForPartitionsToInit.entrySet().stream().filter(e -> e.getKey() 
!= FILES).forEach(
+        e -> {
+          try {
+            initializeMetadataPartition(e.getKey(), e.getValue(), 
dataTableInstantTime, partitionIdToAllFilesMap, 
lazyLatestMergedPartitionFileSliceList);
+          } catch (IOException ex) {
+            throw new HoodieMetadataException("Failed to initialize metadata 
partition: " + e.getKey(), ex);
+          }
+          hasPartitionsStateChanged = true;

Review Comment:
   PARTITION_STATS initialization does not depend on COLUMN_STATS having 
already been initialized. During bootstrap, 
PartitionStatsIndexer.buildInitialization() generates its records directly from 
the data table file slices.



##########
hudi-client/hudi-client-common/src/main/java/org/apache/hudi/metadata/index/IndexerFactory.java:
##########
@@ -0,0 +1,105 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hudi.metadata.index;
+
+import org.apache.hudi.common.engine.HoodieEngineContext;
+import org.apache.hudi.common.table.HoodieTableMetaClient;
+import org.apache.hudi.config.HoodieWriteConfig;
+import org.apache.hudi.exception.HoodieNotSupportedException;
+import org.apache.hudi.metadata.MetadataPartitionType;
+import org.apache.hudi.metadata.index.bloomfilters.BloomFiltersIndexer;
+import org.apache.hudi.metadata.index.columnstats.ColumnStatsIndexer;
+import org.apache.hudi.metadata.index.expression.ExpressionIndexer;
+import org.apache.hudi.metadata.index.files.FilesIndexer;
+import org.apache.hudi.metadata.index.partitionstats.PartitionStatsIndexer;
+import org.apache.hudi.metadata.index.record.PartitionedRecordIndexer;
+import org.apache.hudi.metadata.index.record.RecordIndexer;
+import org.apache.hudi.metadata.index.secondary.SecondaryIndexer;
+
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.Map;
+import java.util.function.Function;
+import java.util.stream.Collectors;
+
+/**
+ * Factory for creating {@link Indexer} implementations and resolving enabled 
indexers
+ * based on table and metadata configuration.
+ */
+public class IndexerFactory {
+  private static Indexer createIndexer(MetadataPartitionType partitionType,
+                                       HoodieEngineContext engineContext,
+                                       HoodieWriteConfig dataTableWriteConfig,
+                                       HoodieTableMetaClient 
dataTableMetaClient,
+                                       ExpressionIndexRecordGenerator 
expressionIndexRecordGenerator) {
+    switch (partitionType) {
+      case FILES:
+        return new FilesIndexer(engineContext, dataTableWriteConfig, 
dataTableMetaClient);
+      case BLOOM_FILTERS:
+        return new BloomFiltersIndexer(engineContext, dataTableWriteConfig, 
dataTableMetaClient);
+      case COLUMN_STATS:
+        return new ColumnStatsIndexer(engineContext, dataTableWriteConfig, 
dataTableMetaClient);
+      case RECORD_INDEX:
+        return dataTableWriteConfig.isRecordLevelIndexEnabled()
+            ? new PartitionedRecordIndexer(engineContext, 
dataTableWriteConfig, dataTableMetaClient)
+            : new RecordIndexer(engineContext, dataTableWriteConfig, 
dataTableMetaClient);
+      case EXPRESSION_INDEX:
+        return new ExpressionIndexer(engineContext, dataTableWriteConfig, 
dataTableMetaClient, expressionIndexRecordGenerator);
+      case PARTITION_STATS:
+        return new PartitionStatsIndexer(engineContext, dataTableWriteConfig, 
dataTableMetaClient);
+      case SECONDARY_INDEX:
+        return new SecondaryIndexer(engineContext, dataTableWriteConfig, 
dataTableMetaClient);
+      default:
+        throw new HoodieNotSupportedException("Unsupported metadata partition 
type for indexing: " + partitionType);
+    }
+  }
+
+  /**
+   * Returns the map of metadata partition type to the indexer for the enabled 
metadata
+   * partition types based on the metadata config and table config.
+   */
+  public static Map<MetadataPartitionType, Indexer> getEnabledIndexerMap(
+      HoodieEngineContext engineContext,
+      HoodieWriteConfig dataTableWriteConfig,
+      HoodieTableMetaClient dataTableMetaClient,
+      ExpressionIndexRecordGenerator expressionIndexRecordGenerator) {
+    if (!dataTableWriteConfig.getMetadataConfig().isEnabled()) {
+      return Collections.emptyMap();
+    }
+    return 
Collections.unmodifiableMap(Arrays.stream(MetadataPartitionType.getValidValues(dataTableMetaClient.getTableConfig().getTableVersion()))
+        .filter(partitionType ->
+            
(partitionType.isMetadataPartitionEnabled(dataTableWriteConfig.getMetadataConfig(),
 dataTableMetaClient.getTableConfig())

Review Comment:
   PARTITION_STATS initialization does not depend on COLUMN_STATS having 
already been initialized. During bootstrap, 
PartitionStatsIndexer.buildInitialization() generates its records directly from 
the data table file slices. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to