lokeshj1703 commented on code in PR #12558:
URL: https://github.com/apache/hudi/pull/12558#discussion_r1908717943
##########
hudi-client/hudi-spark-client/src/main/java/org/apache/hudi/client/utils/SparkMetadataWriterUtils.java:
##########
@@ -226,4 +278,155 @@ private static List<Row> toRows(List<HoodieRecord>
records, Schema schema, Hoodi
.collect(Collectors.toList());
return avroRecords;
}
+
+ /**
+ * Generates expression index records
+ * @param partitionFilePathAndSizeTriplet Triplet of file path, file size
and partition name to which file belongs
+ * @param indexDefinition Hoodie Index Definition for the expression index
for which records need to be generated
+ * @param metaClient Hoodie Table Meta Client
+ * @param parallelism Parallelism to use for engine operations
+ * @param readerSchema Schema of reader
+ * @param instantTime Instant time
+ * @param engineContext HoodieEngineContext
+ * @param dataWriteConfig Write Config for the data table
+ * @param metadataWriteConfig Write config for the metadata table
+ * @param partitionRecordsFunctionOpt Function used to generate partition
stat records for the EI. It takes the column range metadata generated for the
provided partition files as input
+ * and uses those to generate the final
partition stats
+ * @return ExpressionIndexComputationMetadata containing both EI column stat
records and partition stat records if partitionRecordsFunctionOpt is provided
+ */
+ @SuppressWarnings("checkstyle:LineLength")
+ public static ExpressionIndexComputationMetadata
getExprIndexRecords(List<Pair<String, Pair<String, Long>>>
partitionFilePathAndSizeTriplet, HoodieIndexDefinition indexDefinition,
+
HoodieTableMetaClient metaClient, int parallelism, Schema readerSchema, String
instantTime,
+
HoodieEngineContext engineContext, HoodieWriteConfig dataWriteConfig,
HoodieWriteConfig metadataWriteConfig,
+
Option<Function<HoodiePairData<String, HoodieColumnRangeMetadata<Comparable>>,
HoodieData<HoodieRecord>>> partitionRecordsFunctionOpt) {
+ HoodieSparkEngineContext sparkEngineContext = (HoodieSparkEngineContext)
engineContext;
+ if (indexDefinition.getSourceFields().isEmpty()) {
+ // In case there are no columns to index, bail
+ return new
ExpressionIndexComputationMetadata(sparkEngineContext.emptyHoodieData());
+ }
+
+ // NOTE: We are assuming that the index expression is operating on a
single column
+ // HUDI-6994 will address this.
+ ValidationUtils.checkArgument(indexDefinition.getSourceFields().size() ==
1, "Only one source field is supported for expression index");
+ String columnToIndex = indexDefinition.getSourceFields().get(0);
+ SQLContext sqlContext = sparkEngineContext.getSqlContext();
+
+ // Read records and append expression index metadata to every row
+ HoodieData<Row> rowData =
sparkEngineContext.parallelize(partitionFilePathAndSizeTriplet, parallelism)
+ .flatMap((SerializableFunction<Pair<String, Pair<String, Long>>,
Iterator<Row>>) entry ->
+ getExpressionIndexRecordsIterator(metaClient, readerSchema,
dataWriteConfig, entry, sqlContext));
+
+ // Generate dataset with expression index metadata
+ StructType structType =
AvroConversionUtils.convertAvroSchemaToStructType(readerSchema)
+
.add(StructField.apply(HoodieExpressionIndex.HOODIE_EXPRESSION_INDEX_PARTITION,
DataTypes.StringType, false, Metadata.empty()))
+
.add(StructField.apply(HoodieExpressionIndex.HOODIE_EXPRESSION_INDEX_RELATIVE_FILE_PATH,
DataTypes.StringType, false, Metadata.empty()))
+
.add(StructField.apply(HoodieExpressionIndex.HOODIE_EXPRESSION_INDEX_FILE_SIZE,
DataTypes.LongType, false, Metadata.empty()));
+ Dataset<Row> rowDataset =
sparkEngineContext.getSqlContext().createDataFrame(HoodieJavaRDD.getJavaRDD(rowData).rdd(),
structType);
+
+ // Apply expression index and generate the column to index
+ HoodieExpressionIndex<Column, Column> expressionIndex =
+ new HoodieSparkExpressionIndex(indexDefinition.getIndexName(),
indexDefinition.getIndexFunction(), indexDefinition.getSourceFields(),
indexDefinition.getIndexOptions());
+ Column indexedColumn =
expressionIndex.apply(Collections.singletonList(rowDataset.col(columnToIndex)));
+ rowDataset = rowDataset.withColumn(columnToIndex, indexedColumn);
+
+ // Generate expression index records
+ if
(indexDefinition.getIndexType().equalsIgnoreCase(PARTITION_NAME_COLUMN_STATS)) {
+ return getExpressionIndexRecordsUsingColumnStats(rowDataset,
expressionIndex, columnToIndex, partitionRecordsFunctionOpt);
+ } else if
(indexDefinition.getIndexType().equalsIgnoreCase(PARTITION_NAME_BLOOM_FILTERS))
{
+ return getExpressionIndexRecordsUsingBloomFilter(rowDataset,
columnToIndex, metadataWriteConfig, instantTime,
indexDefinition.getIndexName());
+ } else {
+ throw new UnsupportedOperationException(indexDefinition.getIndexType() +
" is not yet supported");
+ }
+ }
+
+ private static Iterator<Row>
getExpressionIndexRecordsIterator(HoodieTableMetaClient metaClient, Schema
readerSchema, HoodieWriteConfig dataWriteConfig, Pair<String, Pair<String,
Long>> entry,
+ SQLContext sqlContext) {
+ String partition = entry.getKey();
+ Pair<String, Long> filePathSizePair = entry.getValue();
+ String filePath = filePathSizePair.getKey();
+ String relativeFilePath =
FSUtils.getRelativePartitionPath(metaClient.getBasePath(), new
StoragePath(filePath));
+ long fileSize = filePathSizePair.getValue();
+ List<Row> rowsForFilePath = readRecordsAsRows(new StoragePath[] {new
StoragePath(filePath)}, sqlContext, metaClient, readerSchema, dataWriteConfig,
+ FSUtils.isBaseFile(new
StoragePath(filePath.substring(filePath.lastIndexOf("/") + 1))));
+ List<Row> rowsWithIndexMetadata =
getRowsWithExpressionIndexMetadata(rowsForFilePath, partition,
relativeFilePath, fileSize);
+ return rowsWithIndexMetadata.iterator();
+ }
+
+ /**
+ * Fetches column range metadata from the EI partition for all the partition
files impacted by the commit. This would only take into account completed
commits for the partitions
+ * since EI updates have not yet been committed.
+ *
+ * @param commitMetadata Hoodie commit metadata
+ * @param indexPartition Partition name for the expression index
+ * @param engineContext Hoodie engine context
+ * @param tableMetadata
+ * @param dataMetaClient Data table meta client
+ * @param metadataConfig Hoodie metadata config
+ * @return HoodiePairData of partition name and list of column range
metadata for the partitions
+ */
+ public static HoodiePairData<String,
List<HoodieColumnRangeMetadata<Comparable>>>
getExpressionIndexPartitionStatUpdates(HoodieCommitMetadata commitMetadata,
String indexPartition,
+
HoodieEngineContext engineContext,
HoodieTableMetadata tableMetadata,
+
HoodieTableMetaClient
dataMetaClient, HoodieMetadataConfig metadataConfig) {
+ List<HoodieWriteStat> allWriteStats =
commitMetadata.getPartitionToWriteStats().values().stream()
+ .flatMap(Collection::stream).collect(Collectors.toList());
+ if (allWriteStats.isEmpty()) {
+ return engineContext.emptyHoodieData().mapToPair(o -> Pair.of("", new
ArrayList<>()));
+ }
+
+ HoodieIndexDefinition indexDefinition =
HoodieTableMetadataUtil.getHoodieIndexDefinition(indexPartition,
dataMetaClient);
+ List<String> columnsToIndex =
Collections.singletonList(indexDefinition.getSourceFields().get(0));
+ try {
+ Option<Schema> writerSchema =
+
Option.ofNullable(commitMetadata.getMetadata(HoodieCommitMetadata.SCHEMA_KEY))
+ .flatMap(writerSchemaStr ->
+ isNullOrEmpty(writerSchemaStr)
+ ? Option.empty()
+ : Option.of(new Schema.Parser().parse(writerSchemaStr)));
+ HoodieTableConfig tableConfig = dataMetaClient.getTableConfig();
+ Schema tableSchema = writerSchema.map(schema ->
tableConfig.populateMetaFields() ? addMetadataFields(schema) : schema)
+ .orElseThrow(() -> new IllegalStateException(String.format("Expected
writer schema in commit metadata %s", commitMetadata)));
+ // filter columns with only supported types
+ final List<String> validColumnsToIndex = columnsToIndex.stream()
+ .filter(col ->
HoodieTableMetadataUtil.SUPPORTED_META_FIELDS_PARTITION_STATS.contains(col) ||
HoodieTableMetadataUtil.validateDataTypeForPartitionStats(col, tableSchema))
+ .collect(Collectors.toList());
+ if (validColumnsToIndex.isEmpty()) {
+ return engineContext.emptyHoodieData().mapToPair(o -> Pair.of("", new
ArrayList<>()));
+ }
+ LOG.debug("Indexing following columns for partition stats index: {}",
validColumnsToIndex);
+ // Group by partitionPath and then gather write stats lists,
+ // where each inner list contains HoodieWriteStat objects that have the
same partitionPath.
+ List<List<HoodieWriteStat>> partitionedWriteStats = new
ArrayList<>(allWriteStats.stream()
+ .collect(Collectors.groupingBy(HoodieWriteStat::getPartitionPath))
Review Comment:
Added test for it. Yes, it is tracked as empty string.
##########
hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java:
##########
@@ -397,7 +395,10 @@ public static Map<String, HoodieData<HoodieRecord>>
convertMetadataToRecords(Hoo
if (enabledPartitionTypes.contains(MetadataPartitionType.PARTITION_STATS))
{
checkState(MetadataPartitionType.COLUMN_STATS.isMetadataPartitionAvailable(dataMetaClient),
"Column stats partition must be enabled to generate partition stats.
Please enable: " +
HoodieMetadataConfig.ENABLE_METADATA_INDEX_COLUMN_STATS.key());
- final HoodieData<HoodieRecord> partitionStatsRDD =
convertMetadataToPartitionStatsRecords(commitMetadata, context, dataMetaClient,
metadataConfig);
+ // Generate Hoodie Pair data of partition name and list of column range
metadata for all the files in that partition
+ HoodiePairData<String, List<HoodieColumnRangeMetadata<Comparable>>>
columnRangeMetadata =
convertMetadataToPartitionStatsColumnRangeMetadata(commitMetadata, context,
+ dataMetaClient, tableMetadata, metadataConfig);
+ final HoodieData<HoodieRecord> partitionStatsRDD =
convertMetadataToPartitionStatsRecords(columnRangeMetadata, dataMetaClient);
Review Comment:
Addressed
##########
hudi-common/src/main/java/org/apache/hudi/metadata/HoodieTableMetadataUtil.java:
##########
@@ -2499,14 +2535,36 @@ private static
List<HoodieColumnRangeMetadata<Comparable>> getFileStatsRangeMeta
return readColumnRangeMetadataFrom(partitionPath, fileName,
datasetMetaClient, columnsToIndex, maxBufferSize);
}
- public static HoodieData<HoodieRecord>
convertMetadataToPartitionStatsRecords(HoodieCommitMetadata commitMetadata,
-
HoodieEngineContext engineContext,
-
HoodieTableMetaClient dataMetaClient,
-
HoodieMetadataConfig metadataConfig) {
+ public static HoodieData<HoodieRecord>
convertMetadataToPartitionStatsRecords(HoodiePairData<String,
List<HoodieColumnRangeMetadata<Comparable>>> columnRangeMetadataPartitionPair,
+
HoodieTableMetaClient dataMetaClient) {
+ return
convertMetadataToPartitionStatsRecords(columnRangeMetadataPartitionPair.flatMapValues(List::iterator),
+ Option.empty(), isShouldScanColStatsForTightBound(dataMetaClient));
+ }
+
+ public static HoodieData<HoodieRecord>
convertMetadataToPartitionStatsRecords(HoodiePairData<String,
HoodieColumnRangeMetadata<Comparable>> columnRangeMetadataPartitionPair,
+
Option<String> indexPartitionOpt, boolean isTightBound) {
+ try {
+ return columnRangeMetadataPartitionPair
+ .groupByKey()
+ .map(pair -> {
+ final String partitionName = pair.getLeft();
+ return collectAndProcessColumnMetadata(pair.getRight(),
partitionName, isTightBound, indexPartitionOpt);
+ })
+ .flatMap(recordStream -> recordStream.iterator());
+ } catch (Exception e) {
+ throw new HoodieException("Failed to generate column stats records for
metadata table", e);
+ }
+ }
+
+ public static HoodiePairData<String,
List<HoodieColumnRangeMetadata<Comparable>>>
convertMetadataToPartitionStatsColumnRangeMetadata(HoodieCommitMetadata
commitMetadata,
+
HoodieEngineContext
engineContext,
+
HoodieTableMetaClient
dataMetaClient,
+
HoodieTableMetadata
tableMetadata,
+
HoodieMetadataConfig
metadataConfig) {
List<HoodieWriteStat> allWriteStats =
commitMetadata.getPartitionToWriteStats().values().stream()
.flatMap(Collection::stream).collect(Collectors.toList());
if (allWriteStats.isEmpty()) {
- return engineContext.emptyHoodieData();
+ return engineContext.emptyHoodieData().mapToPair(o -> Pair.of("", new
ArrayList<>()));
Review Comment:
After fusion the mapping is removed
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]