amogh-jahagirdar commented on code in PR #5669: URL: https://github.com/apache/iceberg/pull/5669#discussion_r985139094
########## core/src/main/java/org/apache/iceberg/ReachableFileCleanup.java: ########## @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.iceberg; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ExecutorService; +import java.util.function.Consumer; +import java.util.stream.Collectors; +import org.apache.iceberg.exceptions.NotFoundException; +import org.apache.iceberg.exceptions.RuntimeIOException; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.relocated.com.google.common.base.Joiner; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.relocated.com.google.common.collect.MapDifference; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.relocated.com.google.common.collect.Sets; +import org.apache.iceberg.util.Tasks; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * File cleanup strategy for snapshot expiration which determines, via an in-memory reference set, + * metadata and data files that are not reachable given the previous and current table states. + */ +class ReachableFileCleanup extends FileCleanupStrategy { + + private static final Logger LOG = LoggerFactory.getLogger(IncrementalFileCleanup.class); + + private final TableMetadata afterExpiration; + private final TableMetadata beforeExpiration; + + ReachableFileCleanup( + TableOperations ops, + TableMetadata beforeExpiration, + ExecutorService deleteExecutorService, + ExecutorService planExecutorService, + Consumer<String> deleteFunc) { + super(ops, deleteExecutorService, planExecutorService, deleteFunc); + this.beforeExpiration = beforeExpiration; + this.afterExpiration = ops.refresh(); + } + + @Override + public void cleanFiles() { + // Identify all of the manifest lists to retain + Set<String> manifestListsBeforeExpiration = + ReachableFileUtil.manifestListLocations(beforeExpiration); + Map<String, ManifestFile> manifestsBeforeExpiration = + computeManifestsForManifestLists(manifestListsBeforeExpiration); + // Identify all of the manifest lists that currently exist + Set<String> manifestListsAfterExpiration = + ReachableFileUtil.manifestListLocations(afterExpiration); + Map<String, ManifestFile> manifestsAfterExpiration = + computeManifestsForManifestLists(manifestListsAfterExpiration); + + // The manifest files which we could delete are just the ones which existed before expiration + // which no longer exist + Set<String> manifestListsToDelete = + Sets.difference(manifestListsBeforeExpiration, manifestListsAfterExpiration); + + MapDifference<String, ManifestFile> diff = + Maps.difference(manifestsBeforeExpiration, manifestsAfterExpiration); + + List<ManifestFile> manifestsToDelete = Lists.newArrayList(diff.entriesOnlyOnLeft().values()); Review Comment: Yes, I'm planning on changing for exactly this; I earlier applied this principle for data files down below (i.e. have a set of files which are possible candidates for deletion based on the delta between before and after, and then use the latest as a source of truth and remove candidates which are still reachable, because those are no longer viable candidates for deletion). I believe we can apply this principle for manifests as well and this should in theory further optimize the procedure for the case where we have a lot of snapshots being expired, and a bunch of manifests which are no longer reachable. Although I think in practice for such cases people would use the Spark procedure anyways? Regardless, will do this @singhpk234 ! -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
