CTTY commented on code in PR #2232: URL: https://github.com/apache/iceberg-rust/pull/2232#discussion_r2927659961
########## crates/iceberg/src/catalog/utils.rs: ########## @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Utility functions for catalog operations. + +use std::collections::HashSet; + +use crate::io::FileIO; +use crate::spec::TableMetadata; +use crate::Result; + +/// Property key for enabling garbage collection on drop. +/// When set to `false`, data files will not be deleted when a table is dropped. +/// Defaults to `true`. +pub const GC_ENABLED: &str = "gc.enabled"; +const GC_ENABLED_DEFAULT: bool = true; Review Comment: Should be moved to TableProperty ########## crates/iceberg/src/catalog/utils.rs: ########## @@ -0,0 +1,151 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Utility functions for catalog operations. + +use std::collections::HashSet; + +use crate::io::FileIO; +use crate::spec::TableMetadata; +use crate::Result; + +/// Property key for enabling garbage collection on drop. +/// When set to `false`, data files will not be deleted when a table is dropped. +/// Defaults to `true`. +pub const GC_ENABLED: &str = "gc.enabled"; +const GC_ENABLED_DEFAULT: bool = true; + +/// Deletes all data and metadata files referenced by the given table metadata. +/// +/// This mirrors the Java implementation's `CatalogUtil.dropTableData`. +/// It collects all manifest files, manifest lists, previous metadata files, +/// statistics files, and partition statistics files, then deletes them. +/// +/// Data files within manifests are only deleted if the `gc.enabled` table +/// property is `true` (the default), to avoid corrupting other tables that +/// may share the same data files. +/// +/// Individual file deletion failures are suppressed to complete as much +/// cleanup as possible, matching the Java behavior. +pub async fn drop_table_data( + io: &FileIO, + metadata: &TableMetadata, + metadata_location: Option<&str>, +) -> Result<()> { + let mut manifest_lists_to_delete: HashSet<String> = HashSet::new(); + let mut manifests_to_delete: HashSet<String> = HashSet::new(); + + for snapshot in metadata.snapshots() { + // Collect the manifest list location + let manifest_list_location = snapshot.manifest_list(); + if !manifest_list_location.is_empty() { + manifest_lists_to_delete.insert(manifest_list_location.to_string()); + } + + // Load all manifests from this snapshot + match snapshot.load_manifest_list(io, metadata).await { + Ok(manifest_list) => { + for manifest_file in manifest_list.entries() { + manifests_to_delete.insert(manifest_file.manifest_path.clone()); + } + } + Err(_) => { + // Suppress failure to continue cleanup + } + } + } + + let gc_enabled = metadata + .properties() + .get(GC_ENABLED) + .and_then(|v| v.parse::<bool>().ok()) + .unwrap_or(GC_ENABLED_DEFAULT); + + // Delete data files only if gc.enabled is true, to avoid corrupting shared tables + if gc_enabled { + delete_data_files(io, &manifests_to_delete).await; + } + + // Delete manifest files + delete_files(io, manifests_to_delete.iter().map(String::as_str)).await; + + // Delete manifest lists + delete_files(io, manifest_lists_to_delete.iter().map(String::as_str)).await; + + // Delete previous metadata files + delete_files( + io, + metadata.metadata_log().iter().map(|m| m.metadata_file.as_str()), + ) + .await; + + // Delete statistics files + delete_files( + io, + metadata + .statistics_iter() + .map(|s| s.statistics_path.as_str()), + ) + .await; + + // Delete partition statistics files + delete_files( + io, + metadata + .partition_statistics_iter() + .map(|s| s.statistics_path.as_str()), + ) + .await; + + // Delete the current metadata file + if let Some(location) = metadata_location { + let _ = io.delete(location).await; + } + + Ok(()) +} + +/// Reads each manifest and deletes the data files referenced within. +async fn delete_data_files(io: &FileIO, manifest_paths: &HashSet<String>) { + for manifest_path in manifest_paths { + let input = match io.new_input(manifest_path) { + Ok(input) => input, + Err(_) => continue, + }; + + let manifest_content = match input.read().await { + Ok(content) => content, + Err(_) => continue, + }; + + let manifest = match crate::spec::Manifest::parse_avro(&manifest_content) { + Ok(manifest) => manifest, + Err(_) => continue, + }; + + for entry in manifest.entries() { + let _ = io.delete(entry.data_file.file_path()).await; + } + } +} + +/// Deletes a collection of files, suppressing individual failures. +async fn delete_files<'a>(io: &FileIO, paths: impl Iterator<Item = &'a str>) { Review Comment: Should use delete_stream api we are about to add ########## crates/catalog/glue/src/catalog.rs: ########## @@ -642,7 +642,7 @@ impl Catalog for GlueCatalog { /// attempting to drop the table. This includes scenarios where /// the table does not exist. /// - Any network or communication error occurs with the database backend. - async fn drop_table(&self, table: &TableIdent) -> Result<()> { + async fn drop_table_with_purge(&self, table: &TableIdent, _purge: bool) -> Result<()> { Review Comment: glue should use purge ########## crates/catalog/hms/src/catalog.rs: ########## @@ -580,7 +580,7 @@ impl Catalog for HmsCatalog { /// attempting to drop the table. This includes scenarios where /// the table does not exist. /// - Any network or communication error occurs with the database backend. - async fn drop_table(&self, table: &TableIdent) -> Result<()> { + async fn drop_table_with_purge(&self, table: &TableIdent, _purge: bool) -> Result<()> { Review Comment: hms should use purge, looking at java's impl: https://github.com/apache/iceberg/blob/8c2ca1d084fca37671ba8b38d59ea3f5a187b147/hive-metastore/src/main/java/org/apache/iceberg/hive/HiveCatalog.java#L244-L251 Looks like we intend to skip hive's purge but use fileIO to purge table ########## crates/iceberg/src/catalog/memory/catalog.rs: ########## @@ -319,7 +319,7 @@ impl Catalog for MemoryCatalog { } /// Drop a table from the catalog. - async fn drop_table(&self, table_ident: &TableIdent) -> Result<()> { + async fn drop_table_with_purge(&self, table_ident: &TableIdent, _purge: bool) -> Result<()> { Review Comment: Memory catalog should use purge ########## crates/catalog/sql/src/catalog.rs: ########## @@ -729,7 +729,7 @@ impl Catalog for SqlCatalog { } } - async fn drop_table(&self, identifier: &TableIdent) -> Result<()> { + async fn drop_table_with_purge(&self, identifier: &TableIdent, _purge: bool) -> Result<()> { Review Comment: I'm actually not sure if sqlCatalog should support this -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
