rdblue commented on a change in pull request #3213: URL: https://github.com/apache/iceberg/pull/3213#discussion_r725242550
########## File path: flink/src/main/java/org/apache/iceberg/flink/sink/compact/CompactFileGenerator.java ########## @@ -0,0 +1,243 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.flink.sink.compact; + +import java.io.IOException; +import java.util.Collection; +import java.util.List; +import java.util.Map; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.stream.Collectors; +import org.apache.flink.runtime.state.StateInitializationContext; +import org.apache.flink.streaming.api.operators.AbstractStreamOperator; +import org.apache.flink.streaming.api.operators.BoundedOneInput; +import org.apache.flink.streaming.api.operators.OneInputStreamOperator; +import org.apache.flink.streaming.runtime.streamrecord.StreamRecord; +import org.apache.iceberg.CombinedScanTask; +import org.apache.iceberg.FileScanTask; +import org.apache.iceberg.PartitionSpec; +import org.apache.iceberg.Table; +import org.apache.iceberg.TableProperties; +import org.apache.iceberg.expressions.Expression; +import org.apache.iceberg.expressions.Expressions; +import org.apache.iceberg.flink.TableLoader; +import org.apache.iceberg.flink.sink.compact.SmallFilesMessage.CommonControllerMessage; +import org.apache.iceberg.flink.sink.compact.SmallFilesMessage.CompactionUnit; +import org.apache.iceberg.flink.sink.compact.SmallFilesMessage.EndCheckpoint; +import org.apache.iceberg.flink.sink.compact.SmallFilesMessage.EndCompaction; +import org.apache.iceberg.io.CloseableIterable; +import org.apache.iceberg.io.CloseableIterator; +import org.apache.iceberg.relocated.com.google.common.base.Joiner; +import org.apache.iceberg.relocated.com.google.common.collect.ListMultimap; +import org.apache.iceberg.relocated.com.google.common.collect.Lists; +import org.apache.iceberg.relocated.com.google.common.collect.Maps; +import org.apache.iceberg.relocated.com.google.common.collect.Multimaps; +import org.apache.iceberg.relocated.com.google.common.collect.Streams; +import org.apache.iceberg.util.PropertyUtil; +import org.apache.iceberg.util.SmallFileUtil; +import org.apache.iceberg.util.StructLikeWrapper; +import org.apache.iceberg.util.TableScanUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import static java.util.Collections.emptyList; + +public class CompactFileGenerator extends AbstractStreamOperator<CommonControllerMessage> + implements OneInputStreamOperator<EndCheckpoint, CommonControllerMessage>, BoundedOneInput { + private static final Logger LOG = LoggerFactory.getLogger(CompactFileGenerator.class); + + private long startingSnapshotId; + private final TableLoader tableLoader; + private transient Table table; + private boolean caseSensitive; + private PartitionSpec spec; + private Expression filter; + private long targetSizeInBytes; + private int splitLookback; + private long splitOpenFileCost; + + public CompactFileGenerator(TableLoader tableLoader) { + this.tableLoader = tableLoader; + } + + @Override + public void initializeState(StateInitializationContext context) throws Exception { + super.initializeState(context); + // Open the table loader and load the table. + this.tableLoader.open(); + this.table = tableLoader.loadTable(); + + this.spec = table.spec(); + this.filter = Expressions.alwaysTrue(); + this.caseSensitive = false; + + long splitSize = PropertyUtil.propertyAsLong( + table.properties(), + TableProperties.SPLIT_SIZE, + TableProperties.SPLIT_SIZE_DEFAULT); + long targetFileSize = PropertyUtil.propertyAsLong( + table.properties(), + TableProperties.WRITE_TARGET_FILE_SIZE_BYTES, + TableProperties.WRITE_TARGET_FILE_SIZE_BYTES_DEFAULT); + long targetMergeFileSize = PropertyUtil.propertyAsLong( + table.properties(), + TableProperties.WRITE_COMPACT_TARGET_FILE_SIZE_BYTES, + targetFileSize); + this.targetSizeInBytes = Math.min(splitSize, targetMergeFileSize); + this.splitLookback = PropertyUtil.propertyAsInt( + table.properties(), + TableProperties.SPLIT_LOOKBACK, + TableProperties.SPLIT_LOOKBACK_DEFAULT); + this.splitOpenFileCost = PropertyUtil.propertyAsLong( + table.properties(), + TableProperties.SPLIT_OPEN_FILE_COST, + TableProperties.SPLIT_OPEN_FILE_COST_DEFAULT); + } + + @Override + public void processElement(StreamRecord<EndCheckpoint> element) throws Exception { + EndCheckpoint endCheckpoint = element.getValue(); + LOG.info("Received an EndCheckpoint {}, begin to compute CompactionUnit", endCheckpoint.getCheckpointId()); + emit(new EndCheckpoint( + endCheckpoint.getCheckpointId(), + getRuntimeContext().getIndexOfThisSubtask(), + getRuntimeContext().getIndexOfThisSubtask()) + ); + + if (SmallFileUtil.shouldMergeSmallFiles(table)) { + table.refresh(); + AtomicInteger index = new AtomicInteger(); + List<CompactionUnit> tasks = getCombinedScanTasks() + .stream() + .map(combineTask -> new CompactionUnit(combineTask, index.getAndIncrement())) + .collect(Collectors.toList()); + + if (!tasks.isEmpty()) { + tasks.forEach(task -> { + emit(task); + LOG.info("Emit CompactionUnit(id: {}, files: {}, total: {}, checkpoint: {})", + task.getUnitId(), + Joiner.on(", ").join(task.getCombinedScanTask().files().stream() + .map(t -> t.file().content() + "=>" + t.file().fileSizeInBytes() / 1024 / 1024 + "M") + .collect(Collectors.toList())), + task.getCombinedScanTask().files().size(), + endCheckpoint.getCheckpointId()); + }); + LOG.info("Summary: (checkpoint: {}, total CompactionUnit: {})", endCheckpoint.getCheckpointId(), tasks.size()); + + // broadcast emit checkpoint barrier + emit(new EndCompaction(endCheckpoint.getCheckpointId(), startingSnapshotId)); + } else { + LOG.warn("CombinedScanTasks is empty, no files need to be compacted"); + } + } + } + + private List<CombinedScanTask> getCombinedScanTasks() { + CloseableIterable<FileScanTask> fileScanTasks = null; + if (table.currentSnapshot() == null) { + return emptyList(); + } + + long startingSnapshot = table.currentSnapshot().snapshotId(); + try { + fileScanTasks = table.newScan() + .useSnapshot(startingSnapshot) + .caseSensitive(caseSensitive) + .ignoreResiduals() + .filter(filter) + .planFiles(); Review comment: I think the misunderstanding is that this should not just compact within a checkpoint, but across checkpoints. Say you have writers w1 and w2 producing files for partitions p1 and p2 in checkpoints c1, c2, etc. You end up writing `p1/w1-c1`, `p2/w1-c1`, `p1/w2-c1`, `p2/w2-c1`, `p1/w1-c2`, `p2/w1-c2`, `p1/w2-c2`, `p2/w2-c2`, `p1/w1-c3`, `p2/w1-c3`, `p1/w2-c3`, `p2/w2-c3`, and so on. As those files are committed, they get sent to compactor tasks keyed by partition. So one gets `p1/w1-c1`, `p1/w2-c1`, `p1/w1-c2`, `p1/w2-c2`, ... and another task gets `p2/w1-c1`, `p2/w2-c1`, `p2/w1-c2`, `p2/w2-c2`, ... Those can be compacted together. The main benefit of compaction is to compact across checkpoints. As files arrive, the compactor task checks whether a file should be added to the currently open file or if it should start a new file. When it starts a new file because some limit has been reached, then it emits the files it compacted to swap. For example, (`p1/w1-c1`, `p1/w2-c1`, `p1/w1-c2`) for `p1/c1-c2-compacted`. Note that: * No table scanning is required * Only files produced by this Flink job will be compacted, so parallel jobs don't conflict with one another -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
