RussellSpitzer commented on a change in pull request #2501: URL: https://github.com/apache/iceberg/pull/2501#discussion_r617842650
########## File path: api/src/main/java/org/apache/iceberg/actions/CompactDataFiles.java ########## @@ -0,0 +1,140 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.apache.iceberg.actions; + +import java.util.Map; +import org.apache.iceberg.actions.compaction.BinPack; +import org.apache.iceberg.expressions.Expression; + +public interface CompactDataFiles extends Action<CompactDataFiles, CompactDataFiles.Result> { + + /** + * Enable committing groups of chunks prior to the entire compaction completing. This will produce additional commits + * but allow for progress even if some chunks fail to commit. The default is false, which produces a single commit + * when all chunks have completed. + */ + String PARTIAL_PROGRESS_ENABLED = "partial_progress.enabled"; + boolean PARTIAL_PROGRESS_ENABLED_DEFAULT = false; + + /** + * The maximum amount of commits that compaction is allowed to produce if partial progress is enabled. + */ + String PARTIAL_PROGRESS_MAX_COMMITS = "partial_progress.max_commits"; + int PARTIAL_PROGRESS_MAX_COMMITS_DEFAULT = 10; + + String COMPACTION_STRATEGY_DEFAULT = BinPack.NAME; + + /** + * The largest amount of data that should be compacted in a single chunk by the underlying framework. This bounds the + * amount of data that would be used in a single shuffle for example. + */ + String MAX_CHUNK_SIZE_BYTES = "max_chunk_size_bytes"; + long MAX_CHUNK_SIZE_DEFAULT = 1024L * 1024L * 1024L * 100L; // 100 Gigabytes + + /** + * The file size that this compaction strategy will attempt to generate when rewriting files. + */ + String TARGET_FILE_SIZE = "target_file_size"; + + /** + * A threshold for preventing compaction of partitions whose output will not more than MIN_COMPACTION_OUTPUT_FILES + * files. + */ + String MIN_COMPACTION_OUTPUT_FILES = "min_output_files"; + int MIN_COMPACTION_OUTPUT_FILES_DEFAULT = 1; + + /** + * A threshold for preventing compaction of partitions which do not have more than MIN_COMPACTION_INPUT_FILES to + * compact. + */ + String MIN_COMPACTION_INPUT_FILES = "min_input_files"; + int MIN_COMPACTION_INPUT_FILES_DEFAULT = 1; Review comment: I feel like this will just be a real trap, kind of like our default expire snapshots 'older-than' value which trips up every first user who starts by just seeing if it does anything but it doesn't. I feel like we are always answering questions about that. I don't know if there is a best in most cases value here, but I still think 1 is the least surprising default. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
