kbendick commented on a change in pull request #3213: URL: https://github.com/apache/iceberg/pull/3213#discussion_r721874293
########## File path: core/src/main/java/org/apache/iceberg/util/SmallFileUtil.java ########## @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.util; + +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileScanTask; +import org.apache.iceberg.Table; +import org.apache.iceberg.TableProperties; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SmallFileUtil { + private static final Logger LOG = LoggerFactory.getLogger(SmallFileUtil.class); + + private SmallFileUtil() { + + } + + /** + * Returns whether small files should be merged. + */ + public static boolean shouldMergeSmallFiles(Table table) { Review comment: Presently, users can bin-pack data using Spark. I believe it uses a threshold on the lower bound that's not a simple check if the file is smaller than the small file size, but some multiple of it. It would be great if the continuous Flink action could compact files in a way that would return results that wouldn't be further changed by calling the Spark bin-packing action. I'd love to hear other's opinions on this though 🙂 ########## File path: core/src/main/java/org/apache/iceberg/util/SmallFileUtil.java ########## @@ -0,0 +1,145 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.util; + +import java.util.Date; +import java.util.List; +import java.util.Map; +import java.util.stream.Collectors; +import org.apache.iceberg.DataFile; +import org.apache.iceberg.FileScanTask; +import org.apache.iceberg.Table; +import org.apache.iceberg.TableProperties; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SmallFileUtil { + private static final Logger LOG = LoggerFactory.getLogger(SmallFileUtil.class); + + private SmallFileUtil() { + + } + + /** + * Returns whether small files should be merged. + */ + public static boolean shouldMergeSmallFiles(Table table) { + boolean shouldMerge = false; + + long currentTimeMillis = System.currentTimeMillis(); + Map<String, String> props = table.properties(); + boolean autoMergeEnable = PropertyUtil.propertyAsBoolean( + props, + TableProperties.WRITE_AUTO_COMPACT_ENABLED, + TableProperties.WRITE_AUTO_COMPACT_ENABLED_DEFAULT); + + long mergeIntervalMillis = PropertyUtil.propertyAsLong( + props, + TableProperties.WRITE_COMPACT_INTERVAL_MS, + TableProperties.WRITE_COMPACT_INTERVAL_MS_DEFAULT); + + long lastCommittedTimestamp = PropertyUtil.propertyAsLong( + props, + TableProperties.WRITE_COMPACT_LAST_REWRITE_MS, + TableProperties.WRITE_COMPACT_LAST_REWRITE_MS_DEFAULT); + + long smallFileThreshold = PropertyUtil.propertyAsLong( + props, + TableProperties.WRITE_COMPACT_SMALL_FILE_NUMS, + TableProperties.WRITE_COMPACT_SMALL_FILE_NUMS_DEFAULT); + + LOG.info("Summary: actual compact interval: {}s, compact.auto.enabled: {}, " + + "compact.interval: {}s, current time: {}, last compact time: {}, small-file-nums: {}", + (currentTimeMillis - lastCommittedTimestamp) / 1000, + autoMergeEnable, + mergeIntervalMillis / 1000, + new Date(currentTimeMillis), + new Date(lastCommittedTimestamp), + smallFileThreshold); + + if (autoMergeEnable && (currentTimeMillis - lastCommittedTimestamp >= mergeIntervalMillis || + smallFileNums(table) >= smallFileThreshold)) { + LOG.info("Should compact small files"); + shouldMerge = true; + } + return shouldMerge; + } + + /** + * Returns the number of small files in current snapshot. + */ + public static long smallFileNums(Table table) { + Map<String, String> props = table.properties(); + long smallFileSize = PropertyUtil.propertyAsLong( + props, + TableProperties.WRITE_COMPACT_SMALL_FILE_SIZE_BYTES, + TableProperties.WRITE_COMPACT_SMALL_FILE_SIZE_BYTES_DEFAULT); + + CloseableIterable<FileScanTask> tasks = table.newScan().ignoreResiduals().planFiles(); + List<DataFile> dataFiles = Lists.newArrayList(CloseableIterable.transform(tasks, FileScanTask::file)); + List<DataFile> filterDataFiles = dataFiles + .stream() + .filter(file -> file.fileSizeInBytes() < smallFileSize) + .collect(Collectors.toList()); + LOG.info("small-file-size-bytes: {}, actual small files numbers: {}", smallFileSize, filterDataFiles.size()); + + return filterDataFiles.size(); + } + + /** + * Returns whether expire snapshot files should be removed. + */ + public static boolean shouldExpireSnapshot(Table table) { Review comment: In my eyes, this doesn't exactly fit into a "SmallFileUtil". The snapshot expiration is time based, not size based. Does it make sense to move it elsewhere? Possibly there's similar code within the Spark actions for maintaining snapshots that can be leveraged or shared. ########## File path: core/src/main/java/org/apache/iceberg/RemoveSnapshots.java ########## @@ -359,10 +361,24 @@ private void removeExpiredFiles(List<Snapshot> snapshots, Set<Long> validIds, Se } } }); - deleteDataFiles(manifestsToScan, manifestsToRevert, validIds); - deleteMetadataFiles(manifestsToDelete, manifestListsToDelete); + if (cleanExpiredFiles) { + deleteDataFiles(manifestsToScan, manifestsToRevert, validIds); + deleteMetadataFiles(manifestsToDelete, manifestListsToDelete); + } else { + Set<String> filesToDelete = findFilesToDelete(manifestsToScan, manifestsToRevert, validIds); + expireSnapshotResult = ExpireSnapshotResult + .builder() + .addManifestFiles(manifestsToDelete) + .addManifestListFiles(manifestListsToDelete) + .addDataFiles(filesToDelete) + .build(); + } } + public ExpireSnapshotResult getExpiredSnapshotResult() { + cleanExpiredSnapshots(); Review comment: Is there any way to avoid this side-effecting call in this function? Maybe Ryan's comment that there's a more appropriate noun is relevant as I don't typically expect a getter to have side effects. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
