blackmwk commented on code in PR #2232: URL: https://github.com/apache/iceberg-rust/pull/2232#discussion_r2963753022
########## crates/iceberg/src/catalog/utils.rs: ########## @@ -0,0 +1,119 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Utility functions for catalog operations. + +use std::collections::HashSet; + +use futures::stream; + +use crate::Result; +use crate::io::FileIO; +use crate::spec::TableMetadata; + +/// Deletes all data and metadata files referenced by the given table metadata. +/// +/// This mirrors the Java implementation's `CatalogUtil.dropTableData`. +/// It collects all manifest files, manifest lists, previous metadata files, +/// statistics files, and partition statistics files, then deletes them. +/// +/// Data files within manifests are only deleted if the `gc.enabled` table +/// property is `true` (the default), to avoid corrupting other tables that +/// may share the same data files. +pub async fn drop_table_data( + io: &FileIO, + metadata: &TableMetadata, + metadata_location: Option<&str>, +) -> Result<()> { + let mut manifest_lists_to_delete: HashSet<String> = HashSet::new(); + let mut manifests_to_delete: HashSet<String> = HashSet::new(); + + for snapshot in metadata.snapshots() { + // Collect the manifest list location + let manifest_list_location = snapshot.manifest_list(); + if !manifest_list_location.is_empty() { + manifest_lists_to_delete.insert(manifest_list_location.to_string()); + } + + // Load all manifests from this snapshot + let manifest_list = snapshot.load_manifest_list(io, metadata).await?; + for manifest_file in manifest_list.entries() { + manifests_to_delete.insert(manifest_file.manifest_path.clone()); + } + } + + // Delete data files only if gc.enabled is true, to avoid corrupting shared tables + if metadata.table_properties()?.gc_enabled { + delete_data_files(io, &manifests_to_delete).await?; + } + + // Delete manifest files + io.delete_stream(stream::iter(manifests_to_delete)).await?; + + // Delete manifest lists + io.delete_stream(stream::iter(manifest_lists_to_delete)) + .await?; + + // Delete previous metadata files + let prev_metadata_paths: Vec<String> = metadata + .metadata_log() + .iter() + .map(|m| m.metadata_file.clone()) + .collect(); + io.delete_stream(stream::iter(prev_metadata_paths)).await?; + + // Delete statistics files + let stats_paths: Vec<String> = metadata + .statistics_iter() + .map(|s| s.statistics_path.clone()) + .collect(); + io.delete_stream(stream::iter(stats_paths)).await?; + + // Delete partition statistics files + let partition_stats_paths: Vec<String> = metadata + .partition_statistics_iter() + .map(|s| s.statistics_path.clone()) + .collect(); + io.delete_stream(stream::iter(partition_stats_paths)) + .await?; + + // Delete the current metadata file + if let Some(location) = metadata_location { + io.delete(location).await?; + } + + Ok(()) +} + +/// Reads each manifest and deletes the data files referenced within. +async fn delete_data_files(io: &FileIO, manifest_paths: &HashSet<String>) -> Result<()> { Review Comment: This maybe quite slow, we may need to parse it concurrently ########## crates/iceberg/src/catalog/mod.rs: ########## @@ -98,6 +99,22 @@ pub trait Catalog: Debug + Sync + Send { /// Drop a table from the catalog, or returns error if it doesn't exist. async fn drop_table(&self, table: &TableIdent) -> Result<()>; + /// Drop a table from the catalog and delete the underlying table data. + /// + /// The default implementation loads the table metadata, drops the table + /// from the catalog, then deletes all associated data and metadata files + /// using [`drop_table_data`](utils::drop_table_data). + async fn purge_table(&self, table: &TableIdent) -> Result<()> { Review Comment: I'm hesitating to add a default method here, which may surprise developer who want to develop a new catalog. I think we have `drop_table_data` as utility method, which is good enough since we already abstract out the most important shared logic. ########## crates/catalog/s3tables/src/catalog.rs: ########## @@ -581,6 +581,14 @@ impl Catalog for S3TablesCatalog { Ok(()) } + /// Purge a table from the S3 Tables catalog. + /// + /// S3 Tables data is managed by the service, so this just delegates + /// to `drop_table` + async fn purge_table(&self, table: &TableIdent) -> Result<()> { + self.drop_table(table).await Review Comment: I think we should throw error here. If s3table doesn't support purge, we should report it rather than silently fail -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
