zhangbutao commented on code in PR #5261:
URL: https://github.com/apache/hive/pull/5261#discussion_r1612941900


##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -498,30 +501,58 @@ public boolean 
canSetColStatistics(org.apache.hadoop.hive.ql.metadata.Table hmsT
   @Override
   public boolean setColStatistics(org.apache.hadoop.hive.ql.metadata.Table 
hmsTable, List<ColumnStatistics> colStats) {
     Table tbl = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
-    String snapshotId = String.format("%s-STATS-%d", tbl.name(), 
tbl.currentSnapshot().snapshotId());
-    return writeColStats(colStats.get(0), tbl, snapshotId);
+    return writeColStats(colStats.get(0), tbl);
   }
 
-  private boolean writeColStats(ColumnStatistics tableColStats, Table tbl, 
String snapshotId) {
+  private boolean writeColStats(ColumnStatistics tableColStats, Table tbl) {
     try {
-      boolean rewriteStats = removeColStatsIfExists(tbl);
-      if (!rewriteStats) {
+      if (!shouldRewriteColStats(tbl)) {
         checkAndMergeColStats(tableColStats, tbl);
       }
       // Currently, we are only serializing table level stats.
       byte[] serializeColStats = SerializationUtils.serialize(tableColStats);
-      try (PuffinWriter writer = 
Puffin.write(tbl.io().newOutputFile(getColStatsPath(tbl).toString()))
+      StatisticsFile statisticsFile;
+      String statsPath = tbl.location() + STATS + UUID.randomUUID();
+
+      try (PuffinWriter puffinWriter = 
Puffin.write(tbl.io().newOutputFile(statsPath))
           .createdBy(Constants.HIVE_ENGINE).build()) {
-        writer.add(new Blob(tbl.name() + "-" + snapshotId, 
ImmutableList.of(1), tbl.currentSnapshot().snapshotId(),
-            tbl.currentSnapshot().sequenceNumber(), 
ByteBuffer.wrap(serializeColStats), PuffinCompressionCodec.NONE,
-            ImmutableMap.of()));
-        writer.finish();
-        return true;
+        long snapshotId = tbl.currentSnapshot().snapshotId();
+        long snapshotSequenceNumber = tbl.currentSnapshot().sequenceNumber();
+        puffinWriter.add(
+            new Blob(
+              ColumnStatisticsObj.class.getSimpleName(),
+              ImmutableList.of(1),
+              snapshotId,
+              snapshotSequenceNumber,
+              ByteBuffer.wrap(serializeColStats),
+              PuffinCompressionCodec.NONE,

Review Comment:
   We can add the `PuffinCompressionCodec.ZSTD` in the future work, to avoid 
stats file consumes much storage space.



##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -498,30 +501,58 @@ public boolean 
canSetColStatistics(org.apache.hadoop.hive.ql.metadata.Table hmsT
   @Override
   public boolean setColStatistics(org.apache.hadoop.hive.ql.metadata.Table 
hmsTable, List<ColumnStatistics> colStats) {
     Table tbl = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
-    String snapshotId = String.format("%s-STATS-%d", tbl.name(), 
tbl.currentSnapshot().snapshotId());
-    return writeColStats(colStats.get(0), tbl, snapshotId);
+    return writeColStats(colStats.get(0), tbl);
   }
 
-  private boolean writeColStats(ColumnStatistics tableColStats, Table tbl, 
String snapshotId) {
+  private boolean writeColStats(ColumnStatistics tableColStats, Table tbl) {
     try {
-      boolean rewriteStats = removeColStatsIfExists(tbl);
-      if (!rewriteStats) {
+      if (!shouldRewriteColStats(tbl)) {
         checkAndMergeColStats(tableColStats, tbl);
       }
       // Currently, we are only serializing table level stats.
       byte[] serializeColStats = SerializationUtils.serialize(tableColStats);
-      try (PuffinWriter writer = 
Puffin.write(tbl.io().newOutputFile(getColStatsPath(tbl).toString()))
+      StatisticsFile statisticsFile;
+      String statsPath = tbl.location() + STATS + UUID.randomUUID();

Review Comment:
   Can we also add the snapshotId on the statsPath? Like `String statsPath = 
tbl.location() + STATS + snapshotId ++ UUID.randomUUID()`, 
   
   Sometimes i want to check the stats puffin in the underlying storage path 
manually, and if i already know the sanpshotId, i can know which puffin file 
should be checked.
   
   



##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -534,10 +565,13 @@ public boolean 
canProvideColStatistics(org.apache.hadoop.hive.ql.metadata.Table
   }
 
   private boolean canProvideColStats(Table table, long snapshotId) {
-    Path statsPath = getColStatsPath(table, snapshotId);
+    Optional<Path> statsPath = IcebergTableUtil.getColStatsPath(table, 
snapshotId);
+    if (!statsPath.isPresent()) {
+      return false;
+    }
     try {
-      FileSystem fs = statsPath.getFileSystem(conf);
-      return  fs.exists(statsPath);
+      FileSystem fs = statsPath.get().getFileSystem(conf);

Review Comment:
   Do we still need the check filesystem path to confirm the stats file?
   
   I think the result of `IcebergTableUtil::getColStatsPath` is enough.



##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -498,30 +501,58 @@ public boolean 
canSetColStatistics(org.apache.hadoop.hive.ql.metadata.Table hmsT
   @Override
   public boolean setColStatistics(org.apache.hadoop.hive.ql.metadata.Table 
hmsTable, List<ColumnStatistics> colStats) {
     Table tbl = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
-    String snapshotId = String.format("%s-STATS-%d", tbl.name(), 
tbl.currentSnapshot().snapshotId());
-    return writeColStats(colStats.get(0), tbl, snapshotId);
+    return writeColStats(colStats.get(0), tbl);
   }
 
-  private boolean writeColStats(ColumnStatistics tableColStats, Table tbl, 
String snapshotId) {
+  private boolean writeColStats(ColumnStatistics tableColStats, Table tbl) {
     try {
-      boolean rewriteStats = removeColStatsIfExists(tbl);
-      if (!rewriteStats) {
+      if (!shouldRewriteColStats(tbl)) {
         checkAndMergeColStats(tableColStats, tbl);
       }
       // Currently, we are only serializing table level stats.
       byte[] serializeColStats = SerializationUtils.serialize(tableColStats);
-      try (PuffinWriter writer = 
Puffin.write(tbl.io().newOutputFile(getColStatsPath(tbl).toString()))
+      StatisticsFile statisticsFile;
+      String statsPath = tbl.location() + STATS + UUID.randomUUID();
+
+      try (PuffinWriter puffinWriter = 
Puffin.write(tbl.io().newOutputFile(statsPath))
           .createdBy(Constants.HIVE_ENGINE).build()) {
-        writer.add(new Blob(tbl.name() + "-" + snapshotId, 
ImmutableList.of(1), tbl.currentSnapshot().snapshotId(),
-            tbl.currentSnapshot().sequenceNumber(), 
ByteBuffer.wrap(serializeColStats), PuffinCompressionCodec.NONE,
-            ImmutableMap.of()));
-        writer.finish();
-        return true;
+        long snapshotId = tbl.currentSnapshot().snapshotId();
+        long snapshotSequenceNumber = tbl.currentSnapshot().sequenceNumber();
+        puffinWriter.add(
+            new Blob(
+              ColumnStatisticsObj.class.getSimpleName(),
+              ImmutableList.of(1),
+              snapshotId,
+              snapshotSequenceNumber,
+              ByteBuffer.wrap(serializeColStats),
+              PuffinCompressionCodec.NONE,
+              ImmutableMap.of()
+          ));
+        puffinWriter.finish();
+
+        statisticsFile =
+          new GenericStatisticsFile(
+            snapshotId,
+            statsPath,
+            puffinWriter.fileSize(),
+            puffinWriter.footerSize(),
+            puffinWriter.writtenBlobsMetadata().stream()
+              .map(GenericBlobMetadata::from)
+              .collect(ImmutableList.toImmutableList())
+          );
       } catch (IOException e) {
         LOG.warn("Unable to write stats to puffin file {}", e.getMessage());
         return false;
       }
-    } catch (InvalidObjectException | IOException e) {
+      Transaction transaction = tbl.newTransaction();
+      transaction
+          .updateStatistics()
+          .setStatistics(statisticsFile.snapshotId(), statisticsFile)
+          .commit();
+      transaction.commitTransaction();
+      return true;
+
+    } catch (InvalidObjectException e) {

Review Comment:
   Should we use `Exception` instead of `InvalidObjectException` ?  I think any 
stats related exception should not interrupt the the sql job.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to