codope commented on code in PR #11921:
URL: https://github.com/apache/hudi/pull/11921#discussion_r1774687269


##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -914,6 +950,97 @@ private void validateAllColumnStats(
     validate(metadataBasedColStats, fsBasedColStats, partitionPath, "column 
stats");
   }
 
+  private void validatePartitionStats(
+      HoodieMetadataValidationContext metadataTableBasedContext,
+      Set<String> baseDataFilesForCleaning,
+      List<String> allPartitions) throws Exception {
+
+    HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);
+    HoodieData<HoodieMetadataColumnStats> partitionStatsUsingColStats = 
getPartitionStatsUsingColStats(metadataTableBasedContext,
+        baseDataFilesForCleaning, allPartitions, engineContext);
+
+    TableSchemaResolver schemaResolver = new TableSchemaResolver(metaClient);
+    boolean enableMetadataTable = true;
+    HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder()
+        .enable(enableMetadataTable)
+        .withMetadataIndexBloomFilter(enableMetadataTable)
+        .withMetadataIndexColumnStats(enableMetadataTable)
+        .withEnableRecordIndex(enableMetadataTable)
+        .build();
+    PartitionStatsIndexSupport partitionStatsIndexSupport =
+        new 
PartitionStatsIndexSupport(engineContext.getSqlContext().sparkSession(), 
AvroConversionUtils.convertAvroSchemaToStructType(schemaResolver.getTableAvroSchema()),
 metadataConfig, metaClient,
+            false);
+    HoodieData<HoodieMetadataColumnStats> partitionStats =
+        
partitionStatsIndexSupport.loadColumnStatsIndexRecords(JavaConverters.asScalaBufferConverter(metadataTableBasedContext.allColumnNameList).asScala().toSeq(),
 false);
+    JavaRDD<HoodieMetadataColumnStats> diffRDD = 
HoodieJavaRDD.getJavaRDD(partitionStats).subtract(HoodieJavaRDD.getJavaRDD(partitionStatsUsingColStats));
+    if (!diffRDD.isEmpty()) {
+      List<HoodieMetadataColumnStats> diff = diffRDD.collect();
+      Set<String> partitionPaths = 
diff.stream().map(HoodieMetadataColumnStats::getFileName).collect(Collectors.toSet());
+      StringBuilder statDiffMsg = new StringBuilder();
+      for (String partitionPath : partitionPaths) {
+        List<HoodieMetadataColumnStats> diffPartitionStatsColStats = 
partitionStatsUsingColStats.filter(stat -> 
stat.getFileName().equals(partitionPath)).collectAsList();
+        List<HoodieMetadataColumnStats> diffPartitionStats = 
partitionStats.filter(stat -> 
stat.getFileName().equals(partitionPath)).collectAsList();
+        statDiffMsg.append(String.format("Partition stats from MDT: %s from 
colstats: %s", Arrays.toString(diffPartitionStats.toArray()), 
Arrays.toString(diffPartitionStatsColStats.toArray())));
+      }
+      throw new HoodieValidationException(String.format("Partition stats 
validation failed diff: %s", statDiffMsg));
+    }
+  }
+
+  private HoodieData<HoodieMetadataColumnStats> 
getPartitionStatsUsingColStats(HoodieMetadataValidationContext 
metadataTableBasedContext, Set<String> baseDataFilesForCleaning,
+                                                                               
List<String> allPartitions, HoodieSparkEngineContext engineContext) {
+    return engineContext.parallelize(allPartitions).flatMap(partitionPath -> {
+      List<FileSlice> latestFileSlicesFromMetadataTable = 
filterFileSliceBasedOnInflightCleaning(metadataTableBasedContext.getSortedLatestFileSliceList(partitionPath),
+          baseDataFilesForCleaning);
+      List<String> latestFileNames = new ArrayList<>();
+      latestFileSlicesFromMetadataTable.stream().filter(fs -> 
fs.getBaseFile().isPresent()).forEach(fs -> {
+        latestFileNames.add(fs.getBaseFile().get().getFileName());
+        
latestFileNames.addAll(fs.getLogFiles().map(HoodieLogFile::getFileName).collect(Collectors.toList()));
+      });
+      List<HoodieColumnRangeMetadata<Comparable>> colStats = 
metadataTableBasedContext
+          .getSortedColumnStatsList(partitionPath, latestFileNames);

Review Comment:
   Does this method take care of collecting stats from log files as well?



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -914,6 +950,97 @@ private void validateAllColumnStats(
     validate(metadataBasedColStats, fsBasedColStats, partitionPath, "column 
stats");
   }
 
+  private void validatePartitionStats(
+      HoodieMetadataValidationContext metadataTableBasedContext,
+      Set<String> baseDataFilesForCleaning,
+      List<String> allPartitions) throws Exception {

Review Comment:
   let's follow the usual style - if all args fit within 200 chars then no need 
to break. If there is a need to break, then keep the first arg in the same line 
as the method, and then each arg on new line indented to the same position as 
the first arg. I think if you install our checkstyle in the IDE, it should 
automatically format that way.



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -914,6 +950,97 @@ private void validateAllColumnStats(
     validate(metadataBasedColStats, fsBasedColStats, partitionPath, "column 
stats");
   }
 
+  private void validatePartitionStats(
+      HoodieMetadataValidationContext metadataTableBasedContext,
+      Set<String> baseDataFilesForCleaning,
+      List<String> allPartitions) throws Exception {
+
+    HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);
+    HoodieData<HoodieMetadataColumnStats> partitionStatsUsingColStats = 
getPartitionStatsUsingColStats(metadataTableBasedContext,
+        baseDataFilesForCleaning, allPartitions, engineContext);
+
+    TableSchemaResolver schemaResolver = new TableSchemaResolver(metaClient);
+    boolean enableMetadataTable = true;
+    HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder()
+        .enable(enableMetadataTable)
+        .withMetadataIndexBloomFilter(enableMetadataTable)
+        .withMetadataIndexColumnStats(enableMetadataTable)
+        .withEnableRecordIndex(enableMetadataTable)
+        .build();
+    PartitionStatsIndexSupport partitionStatsIndexSupport =
+        new 
PartitionStatsIndexSupport(engineContext.getSqlContext().sparkSession(), 
AvroConversionUtils.convertAvroSchemaToStructType(schemaResolver.getTableAvroSchema()),
 metadataConfig, metaClient,
+            false);
+    HoodieData<HoodieMetadataColumnStats> partitionStats =
+        
partitionStatsIndexSupport.loadColumnStatsIndexRecords(JavaConverters.asScalaBufferConverter(metadataTableBasedContext.allColumnNameList).asScala().toSeq(),
 false);
+    JavaRDD<HoodieMetadataColumnStats> diffRDD = 
HoodieJavaRDD.getJavaRDD(partitionStats).subtract(HoodieJavaRDD.getJavaRDD(partitionStatsUsingColStats));
+    if (!diffRDD.isEmpty()) {
+      List<HoodieMetadataColumnStats> diff = diffRDD.collect();
+      Set<String> partitionPaths = 
diff.stream().map(HoodieMetadataColumnStats::getFileName).collect(Collectors.toSet());
+      StringBuilder statDiffMsg = new StringBuilder();
+      for (String partitionPath : partitionPaths) {
+        List<HoodieMetadataColumnStats> diffPartitionStatsColStats = 
partitionStatsUsingColStats.filter(stat -> 
stat.getFileName().equals(partitionPath)).collectAsList();
+        List<HoodieMetadataColumnStats> diffPartitionStats = 
partitionStats.filter(stat -> 
stat.getFileName().equals(partitionPath)).collectAsList();
+        statDiffMsg.append(String.format("Partition stats from MDT: %s from 
colstats: %s", Arrays.toString(diffPartitionStats.toArray()), 
Arrays.toString(diffPartitionStatsColStats.toArray())));
+      }
+      throw new HoodieValidationException(String.format("Partition stats 
validation failed diff: %s", statDiffMsg));
+    }
+  }
+
+  private HoodieData<HoodieMetadataColumnStats> 
getPartitionStatsUsingColStats(HoodieMetadataValidationContext 
metadataTableBasedContext, Set<String> baseDataFilesForCleaning,
+                                                                               
List<String> allPartitions, HoodieSparkEngineContext engineContext) {
+    return engineContext.parallelize(allPartitions).flatMap(partitionPath -> {
+      List<FileSlice> latestFileSlicesFromMetadataTable = 
filterFileSliceBasedOnInflightCleaning(metadataTableBasedContext.getSortedLatestFileSliceList(partitionPath),
+          baseDataFilesForCleaning);
+      List<String> latestFileNames = new ArrayList<>();
+      latestFileSlicesFromMetadataTable.stream().filter(fs -> 
fs.getBaseFile().isPresent()).forEach(fs -> {
+        latestFileNames.add(fs.getBaseFile().get().getFileName());
+        
latestFileNames.addAll(fs.getLogFiles().map(HoodieLogFile::getFileName).collect(Collectors.toList()));
+      });
+      List<HoodieColumnRangeMetadata<Comparable>> colStats = 
metadataTableBasedContext
+          .getSortedColumnStatsList(partitionPath, latestFileNames);
+
+      TreeSet<HoodieColumnRangeMetadata<Comparable>> aggregatedColumnStats = 
aggregateColumnStats(partitionPath, colStats);
+      List<HoodieRecord> partitionStatRecords = 
HoodieMetadataPayload.createPartitionStatsRecords(partitionPath, new 
ArrayList<>(aggregatedColumnStats), false)
+          .collect(Collectors.toList());
+      return partitionStatRecords.stream()
+          .map(record -> {
+            try {
+              return ((HoodieMetadataPayload) 
record.getData()).getInsertValue(null, null)
+                  .map(metadataRecord -> ((HoodieMetadataRecord) 
metadataRecord).getColumnStatsMetadata());
+            } catch (IOException e) {
+              throw new RuntimeException(e);
+            }
+          })
+          .filter(Option::isPresent)
+          .map(Option::get)
+          .collect(Collectors.toList())
+          .iterator();
+    });
+  }
+
+  /**
+   * Generates aggregated column stats which also signify as partition stat 
for the particular partition
+   * path.
+   *
+   * @param partitionPath Provided partition path
+   * @param colStats Column stat records for the partition
+   */
+  private static TreeSet<HoodieColumnRangeMetadata<Comparable>> 
aggregateColumnStats(String partitionPath, 
List<HoodieColumnRangeMetadata<Comparable>> colStats) {
+    TreeSet<HoodieColumnRangeMetadata<Comparable>> aggregatedColumnStats = new 
TreeSet<>(Comparator.comparing(HoodieColumnRangeMetadata::getColumnName));
+    for (HoodieColumnRangeMetadata<Comparable> colStat : colStats) {

Review Comment:
   Check if you can `reduce` colStats using `HoodieColumnRangeMetadata.merge`. 
Then we may not need this method.



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -914,6 +950,97 @@ private void validateAllColumnStats(
     validate(metadataBasedColStats, fsBasedColStats, partitionPath, "column 
stats");
   }
 
+  private void validatePartitionStats(
+      HoodieMetadataValidationContext metadataTableBasedContext,
+      Set<String> baseDataFilesForCleaning,
+      List<String> allPartitions) throws Exception {
+
+    HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);
+    HoodieData<HoodieMetadataColumnStats> partitionStatsUsingColStats = 
getPartitionStatsUsingColStats(metadataTableBasedContext,
+        baseDataFilesForCleaning, allPartitions, engineContext);
+
+    TableSchemaResolver schemaResolver = new TableSchemaResolver(metaClient);
+    boolean enableMetadataTable = true;
+    HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder()
+        .enable(enableMetadataTable)
+        .withMetadataIndexBloomFilter(enableMetadataTable)
+        .withMetadataIndexColumnStats(enableMetadataTable)
+        .withEnableRecordIndex(enableMetadataTable)
+        .build();
+    PartitionStatsIndexSupport partitionStatsIndexSupport =
+        new 
PartitionStatsIndexSupport(engineContext.getSqlContext().sparkSession(), 
AvroConversionUtils.convertAvroSchemaToStructType(schemaResolver.getTableAvroSchema()),
 metadataConfig, metaClient,
+            false);
+    HoodieData<HoodieMetadataColumnStats> partitionStats =
+        
partitionStatsIndexSupport.loadColumnStatsIndexRecords(JavaConverters.asScalaBufferConverter(metadataTableBasedContext.allColumnNameList).asScala().toSeq(),
 false);
+    JavaRDD<HoodieMetadataColumnStats> diffRDD = 
HoodieJavaRDD.getJavaRDD(partitionStats).subtract(HoodieJavaRDD.getJavaRDD(partitionStatsUsingColStats));
+    if (!diffRDD.isEmpty()) {
+      List<HoodieMetadataColumnStats> diff = diffRDD.collect();
+      Set<String> partitionPaths = 
diff.stream().map(HoodieMetadataColumnStats::getFileName).collect(Collectors.toSet());
+      StringBuilder statDiffMsg = new StringBuilder();
+      for (String partitionPath : partitionPaths) {
+        List<HoodieMetadataColumnStats> diffPartitionStatsColStats = 
partitionStatsUsingColStats.filter(stat -> 
stat.getFileName().equals(partitionPath)).collectAsList();
+        List<HoodieMetadataColumnStats> diffPartitionStats = 
partitionStats.filter(stat -> 
stat.getFileName().equals(partitionPath)).collectAsList();
+        statDiffMsg.append(String.format("Partition stats from MDT: %s from 
colstats: %s", Arrays.toString(diffPartitionStats.toArray()), 
Arrays.toString(diffPartitionStatsColStats.toArray())));
+      }
+      throw new HoodieValidationException(String.format("Partition stats 
validation failed diff: %s", statDiffMsg));
+    }
+  }
+
+  private HoodieData<HoodieMetadataColumnStats> 
getPartitionStatsUsingColStats(HoodieMetadataValidationContext 
metadataTableBasedContext, Set<String> baseDataFilesForCleaning,

Review Comment:
   Should we rely on colstats for partition stats validation? What if colstats 
itself is wrong? Maybe we can compute colstats from scratch for the latest file 
slices. Since this is a validator we can probably afford to incur higher 
overhead.



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -914,6 +950,97 @@ private void validateAllColumnStats(
     validate(metadataBasedColStats, fsBasedColStats, partitionPath, "column 
stats");
   }
 
+  private void validatePartitionStats(
+      HoodieMetadataValidationContext metadataTableBasedContext,
+      Set<String> baseDataFilesForCleaning,
+      List<String> allPartitions) throws Exception {
+
+    HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);
+    HoodieData<HoodieMetadataColumnStats> partitionStatsUsingColStats = 
getPartitionStatsUsingColStats(metadataTableBasedContext,
+        baseDataFilesForCleaning, allPartitions, engineContext);
+
+    TableSchemaResolver schemaResolver = new TableSchemaResolver(metaClient);

Review Comment:
   Please check if we need to extract the schema resolver as a member variable. 
I see it's getting instantiated at multiple places in the class.



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -600,6 +634,8 @@ public boolean doMetadataTableValidation() {
         LOG.warn("Metadata table validation failed ({}).", taskLabels);
         return false;
       }
+    } catch (HoodieValidationException e) {
+      throw e;

Review Comment:
   Should we return false with a warn message?



##########
hudi-utilities/src/main/java/org/apache/hudi/utilities/HoodieMetadataTableValidator.java:
##########
@@ -914,6 +950,97 @@ private void validateAllColumnStats(
     validate(metadataBasedColStats, fsBasedColStats, partitionPath, "column 
stats");
   }
 
+  private void validatePartitionStats(
+      HoodieMetadataValidationContext metadataTableBasedContext,
+      Set<String> baseDataFilesForCleaning,
+      List<String> allPartitions) throws Exception {
+
+    HoodieSparkEngineContext engineContext = new HoodieSparkEngineContext(jsc);
+    HoodieData<HoodieMetadataColumnStats> partitionStatsUsingColStats = 
getPartitionStatsUsingColStats(metadataTableBasedContext,
+        baseDataFilesForCleaning, allPartitions, engineContext);
+
+    TableSchemaResolver schemaResolver = new TableSchemaResolver(metaClient);
+    boolean enableMetadataTable = true;
+    HoodieMetadataConfig metadataConfig = HoodieMetadataConfig.newBuilder()
+        .enable(enableMetadataTable)
+        .withMetadataIndexBloomFilter(enableMetadataTable)
+        .withMetadataIndexColumnStats(enableMetadataTable)
+        .withEnableRecordIndex(enableMetadataTable)
+        .build();

Review Comment:
   This should be part of `HoodieMetadataValidationContext`



##########
hudi-utilities/src/test/java/org/apache/hudi/utilities/TestHoodieMetadataTableValidator.java:
##########
@@ -139,6 +139,50 @@ public void testMetadataTableValidation(String 
viewStorageTypeForFSListing, Stri
     assertTrue(validator.getThrowables().isEmpty());
   }
 
+  @Test
+  public void testPartitionStatsValidation() {
+    // TODO: Add validation for compaction and clustering cases

Review Comment:
   you can add it with a relaxed validation - partition stats range is same or 
wider than that after aggregating stats for all files 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to