ayushtkn commented on code in PR #5261:
URL: https://github.com/apache/hive/pull/5261#discussion_r1615322478
##########
iceberg/iceberg-handler/src/main/java/org/apache/iceberg/mr/hive/HiveIcebergStorageHandler.java:
##########
@@ -534,34 +564,24 @@ public boolean
canProvideColStatistics(org.apache.hadoop.hive.ql.metadata.Table
}
private boolean canProvideColStats(Table table, long snapshotId) {
- Path statsPath = getColStatsPath(table, snapshotId);
- try {
- FileSystem fs = statsPath.getFileSystem(conf);
- return fs.exists(statsPath);
- } catch (Exception e) {
- LOG.warn("Exception when trying to find Iceberg column stats for
table:{} , snapshot:{} , " +
- "statsPath: {} , stack trace: {}", table.name(),
table.currentSnapshot(), statsPath, e);
- }
- return false;
+ return IcebergTableUtil.getColStatsPath(table, snapshotId).isPresent();
}
@Override
public List<ColumnStatisticsObj>
getColStatistics(org.apache.hadoop.hive.ql.metadata.Table hmsTable) {
Table table = IcebergTableUtil.getTable(conf, hmsTable.getTTable());
- Path statsPath = getColStatsPath(table);
- LOG.info("Using stats from puffin file at: {}", statsPath);
- return readColStats(table, statsPath).getStatsObj();
+ return IcebergTableUtil.getColStatsPath(table).map(statsPath ->
readColStats(table, statsPath))
+ .orElse(new ColumnStatistics()).getStatsObj();
}
private ColumnStatistics readColStats(Table table, Path statsPath) {
try (PuffinReader reader =
Puffin.read(table.io().newInputFile(statsPath.toString())).build()) {
List<BlobMetadata> blobMetadata = reader.fileMetadata().blobs();
- Map<BlobMetadata, ColumnStatistics> collect =
Streams.stream(reader.readAll(blobMetadata)).collect(
- Collectors.toMap(Pair::first, blobMetadataByteBufferPair ->
SerializationUtils.deserialize(
- ByteBuffers.toByteArray(blobMetadataByteBufferPair.second()))));
- return collect.get(blobMetadata.get(0));
- } catch (IOException | IndexOutOfBoundsException e) {
- LOG.warn(" Unable to read iceberg col stats from puffin files: ", e);
+ byte[] byteBuffer =
ByteBuffers.toByteArray(reader.readAll(blobMetadata).iterator().next().second());
Review Comment:
we didn't do hasNext() here? Can this lead to an exception in case someone
tries to read the file before it was completely written as the case mentioned
in the description?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]