This is an automated email from the ASF dual-hosted git repository.

xuanwo pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/iceberg-rust.git


The following commit(s) were added to refs/heads/main by this push:
     new d78f6567c chore: bump MSRV to 1.88, fix warnings and clippy errors 
(#1902)
d78f6567c is described below

commit d78f6567cabe917d75ae9884144a5a0f70e08ace
Author: Matt Butrovich <[email protected]>
AuthorDate: Tue Dec 9 09:57:36 2025 -0500

    chore: bump MSRV to 1.88, fix warnings and clippy errors (#1902)
    
    https://github.com/apache/iceberg-rust/pull/1899 requires a bump to MSRV
    1.88. This version is within the policy of this project, and since the
    README mentions `MSRV is updated when we release iceberg-rust` and we're
    preparing 0.8, here's a PR for just MSRV 1.88.
    
    ## Which issue does this PR close?
    
    <!--
    We generally require a GitHub issue to be filed for all bug fixes and
    enhancements and this helps us generate change logs for our releases.
    You can link an issue to this PR using the GitHub syntax. For example
    `Closes #123` indicates that this PR will close issue #123.
    -->
    
    N/A.
    
    ## What changes are included in this PR?
    
    <!--
    Provide a summary of the modifications in this PR. List the main changes
    such as new features, bug fixes, refactoring, or any other updates.
    -->
    
    - Bump MSRV to 1.88
    - Fix warnings
    - Fix errors found by `make check-clippy`
    - Format
    
    ## Are these changes tested?
    
    <!--
    Specify what test covers (unit test, integration test, etc.).
    
    If tests are not included in your PR, please explain why (for example,
    are they covered by existing tests)?
    -->
    
    Existing tests
---
 Cargo.toml                                         |  2 +-
 crates/catalog/glue/src/catalog.rs                 | 46 ++++++-------
 crates/iceberg/src/arrow/reader.rs                 | 68 +++++++++----------
 crates/iceberg/src/arrow/record_batch_projector.rs | 35 +++++-----
 .../iceberg/src/arrow/record_batch_transformer.rs  |  2 +-
 crates/iceberg/src/arrow/value.rs                  | 33 +++++----
 crates/iceberg/src/catalog/mod.rs                  | 14 ++--
 crates/iceberg/src/delete_vector.rs                | 10 +--
 .../src/expr/visitors/manifest_evaluator.rs        | 26 ++++----
 .../src/expr/visitors/page_index_evaluator.rs      | 16 ++---
 .../src/expr/visitors/strict_metrics_evaluator.rs  | 24 +++----
 crates/iceberg/src/inspect/metadata_table.rs       |  4 +-
 crates/iceberg/src/io/storage.rs                   |  4 +-
 crates/iceberg/src/io/storage_azdls.rs             | 20 +++---
 crates/iceberg/src/io/storage_gcs.rs               | 26 ++++----
 crates/iceberg/src/io/storage_oss.rs               |  2 +-
 crates/iceberg/src/io/storage_s3.rs                | 24 +++----
 crates/iceberg/src/metadata_columns.rs             |  5 +-
 crates/iceberg/src/scan/mod.rs                     |  6 +-
 crates/iceberg/src/spec/datatypes.rs               |  3 +-
 crates/iceberg/src/spec/manifest/writer.rs         |  8 +--
 crates/iceberg/src/spec/schema/prune_columns.rs    | 34 +++++-----
 crates/iceberg/src/spec/table_metadata.rs          | 71 ++++++++++----------
 crates/iceberg/src/spec/transform.rs               | 78 +++++++++++-----------
 crates/iceberg/src/spec/values/tests.rs            |  2 +-
 crates/iceberg/src/spec/view_metadata_builder.rs   |  8 +--
 crates/iceberg/src/transaction/mod.rs              |  2 +-
 crates/iceberg/src/transaction/snapshot.rs         | 14 ++--
 .../src/writer/file_writer/rolling_writer.rs       | 24 +++----
 crates/iceberg/tests/file_io_gcs_test.rs           |  6 +-
 .../datafusion/src/physical_plan/repartition.rs    | 11 ++-
 .../datafusion/src/physical_plan/sort.rs           |  3 +-
 crates/integrations/datafusion/src/table/mod.rs    |  2 +-
 .../tests/integration_datafusion_test.rs           | 15 ++---
 rust-toolchain.toml                                |  2 +-
 35 files changed, 315 insertions(+), 335 deletions(-)

diff --git a/Cargo.toml b/Cargo.toml
index 36093d92a..9904820de 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -36,7 +36,7 @@ version = "0.7.0"
 license = "Apache-2.0"
 repository = "https://github.com/apache/iceberg-rust";
 # Check the MSRV policy in README.md before changing this
-rust-version = "1.87"
+rust-version = "1.88"
 
 [workspace.dependencies]
 anyhow = "1.0.72"
diff --git a/crates/catalog/glue/src/catalog.rs 
b/crates/catalog/glue/src/catalog.rs
index dce287ed6..37a7996f8 100644
--- a/crates/catalog/glue/src/catalog.rs
+++ b/crates/catalog/glue/src/catalog.rs
@@ -151,33 +151,33 @@ impl GlueCatalog {
     async fn new(config: GlueCatalogConfig) -> Result<Self> {
         let sdk_config = create_sdk_config(&config.props, 
config.uri.as_ref()).await;
         let mut file_io_props = config.props.clone();
-        if !file_io_props.contains_key(S3_ACCESS_KEY_ID) {
-            if let Some(access_key_id) = file_io_props.get(AWS_ACCESS_KEY_ID) {
-                file_io_props.insert(S3_ACCESS_KEY_ID.to_string(), 
access_key_id.to_string());
-            }
+        if !file_io_props.contains_key(S3_ACCESS_KEY_ID)
+            && let Some(access_key_id) = file_io_props.get(AWS_ACCESS_KEY_ID)
+        {
+            file_io_props.insert(S3_ACCESS_KEY_ID.to_string(), 
access_key_id.to_string());
         }
-        if !file_io_props.contains_key(S3_SECRET_ACCESS_KEY) {
-            if let Some(secret_access_key) = 
file_io_props.get(AWS_SECRET_ACCESS_KEY) {
-                file_io_props.insert(
-                    S3_SECRET_ACCESS_KEY.to_string(),
-                    secret_access_key.to_string(),
-                );
-            }
+        if !file_io_props.contains_key(S3_SECRET_ACCESS_KEY)
+            && let Some(secret_access_key) = 
file_io_props.get(AWS_SECRET_ACCESS_KEY)
+        {
+            file_io_props.insert(
+                S3_SECRET_ACCESS_KEY.to_string(),
+                secret_access_key.to_string(),
+            );
         }
-        if !file_io_props.contains_key(S3_REGION) {
-            if let Some(region) = file_io_props.get(AWS_REGION_NAME) {
-                file_io_props.insert(S3_REGION.to_string(), 
region.to_string());
-            }
+        if !file_io_props.contains_key(S3_REGION)
+            && let Some(region) = file_io_props.get(AWS_REGION_NAME)
+        {
+            file_io_props.insert(S3_REGION.to_string(), region.to_string());
         }
-        if !file_io_props.contains_key(S3_SESSION_TOKEN) {
-            if let Some(session_token) = file_io_props.get(AWS_SESSION_TOKEN) {
-                file_io_props.insert(S3_SESSION_TOKEN.to_string(), 
session_token.to_string());
-            }
+        if !file_io_props.contains_key(S3_SESSION_TOKEN)
+            && let Some(session_token) = file_io_props.get(AWS_SESSION_TOKEN)
+        {
+            file_io_props.insert(S3_SESSION_TOKEN.to_string(), 
session_token.to_string());
         }
-        if !file_io_props.contains_key(S3_ENDPOINT) {
-            if let Some(aws_endpoint) = config.uri.as_ref() {
-                file_io_props.insert(S3_ENDPOINT.to_string(), 
aws_endpoint.to_string());
-            }
+        if !file_io_props.contains_key(S3_ENDPOINT)
+            && let Some(aws_endpoint) = config.uri.as_ref()
+        {
+            file_io_props.insert(S3_ENDPOINT.to_string(), 
aws_endpoint.to_string());
         }
 
         let client = aws_sdk_glue::Client::new(&sdk_config);
diff --git a/crates/iceberg/src/arrow/reader.rs 
b/crates/iceberg/src/arrow/reader.rs
index de8a1420e..380d48530 100644
--- a/crates/iceberg/src/arrow/reader.rs
+++ b/crates/iceberg/src/arrow/reader.rs
@@ -504,10 +504,10 @@ impl ArrowReader {
                     // we need to call next() to update the cache with the 
newly positioned value.
                     delete_vector_iter.advance_to(next_row_group_base_idx);
                     // Only update the cache if the cached value is stale (in 
the skipped range)
-                    if let Some(cached_idx) = next_deleted_row_idx_opt {
-                        if cached_idx < next_row_group_base_idx {
-                            next_deleted_row_idx_opt = 
delete_vector_iter.next();
-                        }
+                    if let Some(cached_idx) = next_deleted_row_idx_opt
+                        && cached_idx < next_row_group_base_idx
+                    {
+                        next_deleted_row_idx_opt = delete_vector_iter.next();
                     }
 
                     // still increment the current page base index but then 
skip to the next row group
@@ -861,10 +861,10 @@ impl ArrowReader {
         };
 
         // If all row groups were filtered out, return an empty RowSelection 
(select no rows)
-        if let Some(selected_row_groups) = selected_row_groups {
-            if selected_row_groups.is_empty() {
-                return Ok(RowSelection::from(Vec::new()));
-            }
+        if let Some(selected_row_groups) = selected_row_groups
+            && selected_row_groups.is_empty()
+        {
+            return Ok(RowSelection::from(Vec::new()));
         }
 
         let mut selected_row_groups_idx = 0;
@@ -897,10 +897,10 @@ impl ArrowReader {
 
             results.push(selections_for_page);
 
-            if let Some(selected_row_groups) = selected_row_groups {
-                if selected_row_groups_idx == selected_row_groups.len() {
-                    break;
-                }
+            if let Some(selected_row_groups) = selected_row_groups
+                && selected_row_groups_idx == selected_row_groups.len()
+            {
+                break;
             }
         }
 
@@ -1031,13 +1031,13 @@ fn apply_name_mapping_to_arrow_schema(
 
             let mut metadata = field.metadata().clone();
 
-            if let Some(mapped_field) = mapped_field_opt {
-                if let Some(field_id) = mapped_field.field_id() {
-                    // Field found in mapping with a field_id → assign it
-                    metadata.insert(PARQUET_FIELD_ID_META_KEY.to_string(), 
field_id.to_string());
-                }
-                // If field_id is None, leave the field without an ID (will be 
filtered by projection)
+            if let Some(mapped_field) = mapped_field_opt
+                && let Some(field_id) = mapped_field.field_id()
+            {
+                // Field found in mapping with a field_id → assign it
+                metadata.insert(PARQUET_FIELD_ID_META_KEY.to_string(), 
field_id.to_string());
             }
+            // If field_id is None, leave the field without an ID (will be 
filtered by projection)
             // If field not found in mapping, leave it without an ID (will be 
filtered by projection)
 
             Field::new(field.name(), field.data_type().clone(), 
field.is_nullable())
@@ -2731,15 +2731,14 @@ message schema {
         // Step 4: Verify we got 199 rows (not 200)
         let total_rows: usize = result.iter().map(|b| b.num_rows()).sum();
 
-        println!("Total rows read: {}", total_rows);
+        println!("Total rows read: {total_rows}");
         println!("Expected: 199 rows (deleted row 199 which had id=200)");
 
         // This assertion will FAIL before the fix and PASS after the fix
         assert_eq!(
             total_rows, 199,
-            "Expected 199 rows after deleting row 199, but got {} rows. \
-             The bug causes position deletes in later row groups to be 
ignored.",
-            total_rows
+            "Expected 199 rows after deleting row 199, but got {total_rows} 
rows. \
+             The bug causes position deletes in later row groups to be 
ignored."
         );
 
         // Verify the deleted row (id=200) is not present
@@ -2950,16 +2949,15 @@ message schema {
         // Row group 1 has 100 rows (ids 101-200), minus 1 delete (id=200) = 
99 rows
         let total_rows: usize = result.iter().map(|b| b.num_rows()).sum();
 
-        println!("Total rows read from row group 1: {}", total_rows);
+        println!("Total rows read from row group 1: {total_rows}");
         println!("Expected: 99 rows (row group 1 has 100 rows, 1 delete at 
position 199)");
 
         // This assertion will FAIL before the fix and PASS after the fix
         assert_eq!(
             total_rows, 99,
-            "Expected 99 rows from row group 1 after deleting position 199, 
but got {} rows. \
+            "Expected 99 rows from row group 1 after deleting position 199, 
but got {total_rows} rows. \
              The bug causes position deletes to be lost when advance_to() is 
followed by next() \
-             when skipping unselected row groups.",
-            total_rows
+             when skipping unselected row groups."
         );
 
         // Verify the deleted row (id=200) is not present
@@ -3241,7 +3239,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/1.parquet", table_location),
+                data_file_path: format!("{table_location}/1.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 2],
@@ -3338,7 +3336,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/1.parquet", table_location),
+                data_file_path: format!("{table_location}/1.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 3],
@@ -3424,7 +3422,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/1.parquet", table_location),
+                data_file_path: format!("{table_location}/1.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 2, 3],
@@ -3524,7 +3522,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/1.parquet", table_location),
+                data_file_path: format!("{table_location}/1.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 2],
@@ -3565,7 +3563,7 @@ message schema {
         assert_eq!(all_values.len(), 6);
 
         for i in 0..6 {
-            assert_eq!(all_names[i], format!("name_{}", i));
+            assert_eq!(all_names[i], format!("name_{i}"));
             assert_eq!(all_values[i], i as i32);
         }
     }
@@ -3653,7 +3651,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/1.parquet", table_location),
+                data_file_path: format!("{table_location}/1.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 2],
@@ -3749,7 +3747,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/1.parquet", table_location),
+                data_file_path: format!("{table_location}/1.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 5, 2],
@@ -3858,7 +3856,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/1.parquet", table_location),
+                data_file_path: format!("{table_location}/1.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 2, 3],
@@ -3997,7 +3995,7 @@ message schema {
                 start: 0,
                 length: 0,
                 record_count: None,
-                data_file_path: format!("{}/data.parquet", table_location),
+                data_file_path: format!("{table_location}/data.parquet"),
                 data_file_format: DataFileFormat::Parquet,
                 schema: schema.clone(),
                 project_field_ids: vec![1, 2],
diff --git a/crates/iceberg/src/arrow/record_batch_projector.rs 
b/crates/iceberg/src/arrow/record_batch_projector.rs
index 45de0212e..7028eee96 100644
--- a/crates/iceberg/src/arrow/record_batch_projector.rs
+++ b/crates/iceberg/src/arrow/record_batch_projector.rs
@@ -133,25 +133,24 @@ impl RecordBatchProjector {
     {
         for (pos, field) in fields.iter().enumerate() {
             let id = field_id_fetch_func(field)?;
-            if let Some(id) = id {
-                if target_field_id == id {
-                    index_vec.push(pos);
-                    return Ok(Some(field.clone()));
-                }
+            if let Some(id) = id
+                && target_field_id == id
+            {
+                index_vec.push(pos);
+                return Ok(Some(field.clone()));
             }
-            if let DataType::Struct(inner) = field.data_type() {
-                if searchable_field_func(field) {
-                    if let Some(res) = Self::fetch_field_index(
-                        inner,
-                        index_vec,
-                        target_field_id,
-                        field_id_fetch_func,
-                        searchable_field_func,
-                    )? {
-                        index_vec.push(pos);
-                        return Ok(Some(res));
-                    }
-                }
+            if let DataType::Struct(inner) = field.data_type()
+                && searchable_field_func(field)
+                && let Some(res) = Self::fetch_field_index(
+                    inner,
+                    index_vec,
+                    target_field_id,
+                    field_id_fetch_func,
+                    searchable_field_func,
+                )?
+            {
+                index_vec.push(pos);
+                return Ok(Some(res));
             }
         }
         Ok(None)
diff --git a/crates/iceberg/src/arrow/record_batch_transformer.rs 
b/crates/iceberg/src/arrow/record_batch_transformer.rs
index f30d4a09c..c4782464c 100644
--- a/crates/iceberg/src/arrow/record_batch_transformer.rs
+++ b/crates/iceberg/src/arrow/record_batch_transformer.rs
@@ -582,7 +582,7 @@ impl RecordBatchTransformer {
                 let this_field_id = field_id_str.parse().map_err(|e| {
                     Error::new(
                         ErrorKind::DataInvalid,
-                        format!("field id not parseable as an i32: {}", e),
+                        format!("field id not parseable as an i32: {e}"),
                     )
                 })?;
 
diff --git a/crates/iceberg/src/arrow/value.rs 
b/crates/iceberg/src/arrow/value.rs
index 0e0b85f07..bc123d99e 100644
--- a/crates/iceberg/src/arrow/value.rs
+++ b/crates/iceberg/src/arrow/value.rs
@@ -261,15 +261,15 @@ impl SchemaWithPartnerVisitor<ArrayRef> for 
ArrowArrayToIcebergStructConverter {
                             "The partner is not a decimal128 array",
                         )
                     })?;
-                if let DataType::Decimal128(arrow_precision, arrow_scale) = 
array.data_type() {
-                    if *arrow_precision as u32 != *precision || *arrow_scale 
as u32 != *scale {
-                        return Err(Error::new(
-                            ErrorKind::DataInvalid,
-                            format!(
-                                "The precision or scale 
({arrow_precision},{arrow_scale}) of arrow decimal128 array is not compatible 
with iceberg decimal type ({precision},{scale})"
-                            ),
-                        ));
-                    }
+                if let DataType::Decimal128(arrow_precision, arrow_scale) = 
array.data_type()
+                    && (*arrow_precision as u32 != *precision || *arrow_scale 
as u32 != *scale)
+                {
+                    return Err(Error::new(
+                        ErrorKind::DataInvalid,
+                        format!(
+                            "The precision or scale 
({arrow_precision},{arrow_scale}) of arrow decimal128 array is not compatible 
with iceberg decimal type ({precision},{scale})"
+                        ),
+                    ));
                 }
                 Ok(array.iter().map(|v| v.map(Literal::decimal)).collect())
             }
@@ -351,10 +351,10 @@ impl SchemaWithPartnerVisitor<ArrayRef> for 
ArrowArrayToIcebergStructConverter {
                 } else if let Some(array) = 
partner.as_any().downcast_ref::<StringArray>() {
                     Ok(array.iter().map(|v| v.map(Literal::string)).collect())
                 } else {
-                    return Err(Error::new(
+                    Err(Error::new(
                         ErrorKind::DataInvalid,
                         "The partner is not a string array",
-                    ));
+                    ))
                 }
             }
             PrimitiveType::Uuid => {
@@ -418,10 +418,10 @@ impl SchemaWithPartnerVisitor<ArrayRef> for 
ArrowArrayToIcebergStructConverter {
                         .map(|v| v.map(|v| Literal::binary(v.to_vec())))
                         .collect())
                 } else {
-                    return Err(Error::new(
+                    Err(Error::new(
                         ErrorKind::DataInvalid,
                         "The partner is not a binary array",
-                    ));
+                    ))
                 }
             }
         }
@@ -724,10 +724,7 @@ pub(crate) fn create_primitive_array_single_element(
         }
         _ => Err(Error::new(
             ErrorKind::Unexpected,
-            format!(
-                "Unsupported constant type combination: {:?} with {:?}",
-                data_type, prim_lit
-            ),
+            format!("Unsupported constant type combination: {data_type:?} with 
{prim_lit:?}"),
         )),
     }
 }
@@ -825,7 +822,7 @@ pub(crate) fn create_primitive_array_repeated(
         (dt, _) => {
             return Err(Error::new(
                 ErrorKind::Unexpected,
-                format!("unexpected target column type {}", dt),
+                format!("unexpected target column type {dt}"),
             ));
         }
     })
diff --git a/crates/iceberg/src/catalog/mod.rs 
b/crates/iceberg/src/catalog/mod.rs
index 27d5edaed..f3a521379 100644
--- a/crates/iceberg/src/catalog/mod.rs
+++ b/crates/iceberg/src/catalog/mod.rs
@@ -1000,13 +1000,13 @@ mod _serde_set_statistics {
             snapshot_id,
             statistics,
         } = SetStatistics::deserialize(deserializer)?;
-        if let Some(snapshot_id) = snapshot_id {
-            if snapshot_id != statistics.snapshot_id {
-                return Err(serde::de::Error::custom(format!(
-                    "Snapshot id to set {snapshot_id} does not match the 
statistics file snapshot id {}",
-                    statistics.snapshot_id
-                )));
-            }
+        if let Some(snapshot_id) = snapshot_id
+            && snapshot_id != statistics.snapshot_id
+        {
+            return Err(serde::de::Error::custom(format!(
+                "Snapshot id to set {snapshot_id} does not match the 
statistics file snapshot id {}",
+                statistics.snapshot_id
+            )));
         }
 
         Ok(statistics)
diff --git a/crates/iceberg/src/delete_vector.rs 
b/crates/iceberg/src/delete_vector.rs
index f382bf079..df8a10193 100644
--- a/crates/iceberg/src/delete_vector.rs
+++ b/crates/iceberg/src/delete_vector.rs
@@ -36,7 +36,7 @@ impl DeleteVector {
         }
     }
 
-    pub fn iter(&self) -> DeleteVectorIterator {
+    pub fn iter(&self) -> DeleteVectorIterator<'_> {
         let outer = self.inner.bitmaps();
         DeleteVectorIterator { outer, inner: None }
     }
@@ -93,10 +93,10 @@ impl Iterator for DeleteVectorIterator<'_> {
     type Item = u64;
 
     fn next(&mut self) -> Option<Self::Item> {
-        if let Some(inner) = &mut self.inner {
-            if let Some(inner_next) = inner.bitmap_iter.next() {
-                return Some(u64::from(inner.high_bits) << 32 | 
u64::from(inner_next));
-            }
+        if let Some(inner) = &mut self.inner
+            && let Some(inner_next) = inner.bitmap_iter.next()
+        {
+            return Some(u64::from(inner.high_bits) << 32 | 
u64::from(inner_next));
         }
 
         if let Some((high_bits, next_bitmap)) = self.outer.next() {
diff --git a/crates/iceberg/src/expr/visitors/manifest_evaluator.rs 
b/crates/iceberg/src/expr/visitors/manifest_evaluator.rs
index abbd136cb..770163ae9 100644
--- a/crates/iceberg/src/expr/visitors/manifest_evaluator.rs
+++ b/crates/iceberg/src/expr/visitors/manifest_evaluator.rs
@@ -161,10 +161,10 @@ impl BoundPredicateVisitor for ManifestFilterVisitor<'_> {
         _predicate: &BoundPredicate,
     ) -> crate::Result<bool> {
         let field = self.field_summary_for_reference(reference);
-        if let Some(contains_nan) = field.contains_nan {
-            if !contains_nan {
-                return ROWS_CANNOT_MATCH;
-            }
+        if let Some(contains_nan) = field.contains_nan
+            && !contains_nan
+        {
+            return ROWS_CANNOT_MATCH;
         }
 
         if ManifestFilterVisitor::are_all_null(field, 
&reference.field().field_type) {
@@ -389,16 +389,16 @@ impl BoundPredicateVisitor for ManifestFilterVisitor<'_> {
                 return ROWS_MIGHT_MATCH;
             }
 
-            if prefix.as_bytes().eq(&lower_bound[..prefix_len]) {
-                if let Some(upper_bound) = &field.upper_bound {
-                    // if upper is shorter than the prefix then upper can't 
start with the prefix
-                    if prefix_len > upper_bound.len() {
-                        return ROWS_MIGHT_MATCH;
-                    }
+            if prefix.as_bytes().eq(&lower_bound[..prefix_len])
+                && let Some(upper_bound) = &field.upper_bound
+            {
+                // if upper is shorter than the prefix then upper can't start 
with the prefix
+                if prefix_len > upper_bound.len() {
+                    return ROWS_MIGHT_MATCH;
+                }
 
-                    if prefix.as_bytes().eq(&upper_bound[..prefix_len]) {
-                        return ROWS_CANNOT_MATCH;
-                    }
+                if prefix.as_bytes().eq(&upper_bound[..prefix_len]) {
+                    return ROWS_CANNOT_MATCH;
                 }
             }
         }
diff --git a/crates/iceberg/src/expr/visitors/page_index_evaluator.rs 
b/crates/iceberg/src/expr/visitors/page_index_evaluator.rs
index 3745d94d1..ae3a90627 100644
--- a/crates/iceberg/src/expr/visitors/page_index_evaluator.rs
+++ b/crates/iceberg/src/expr/visitors/page_index_evaluator.rs
@@ -547,16 +547,16 @@ impl BoundPredicateVisitor for PageIndexEvaluator<'_> {
                     return Ok(false);
                 }
 
-                if let Some(min) = min {
-                    if min.gt(datum) {
-                        return Ok(false);
-                    }
+                if let Some(min) = min
+                    && min.gt(datum)
+                {
+                    return Ok(false);
                 }
 
-                if let Some(max) = max {
-                    if max.lt(datum) {
-                        return Ok(false);
-                    }
+                if let Some(max) = max
+                    && max.lt(datum)
+                {
+                    return Ok(false);
                 }
 
                 Ok(true)
diff --git a/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs 
b/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs
index e9bed775e..7c652e206 100644
--- a/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs
+++ b/crates/iceberg/src/expr/visitors/strict_metrics_evaluator.rs
@@ -129,10 +129,10 @@ impl<'a> StrictMetricsEvaluator<'a> {
             self.upper_bound(field_id)
         };
 
-        if let Some(bound) = bound {
-            if cmp_fn(bound, datum) {
-                return ROWS_MUST_MATCH;
-            }
+        if let Some(bound) = bound
+            && cmp_fn(bound, datum)
+        {
+            return ROWS_MUST_MATCH;
         }
 
         ROWS_MIGHT_NOT_MATCH
@@ -219,10 +219,10 @@ impl BoundPredicateVisitor for StrictMetricsEvaluator<'_> 
{
     ) -> crate::Result<bool> {
         let field_id = reference.field().id;
 
-        if let Some(&nan_count) = self.nan_count(field_id) {
-            if nan_count == 0 {
-                return ROWS_MUST_MATCH;
-            }
+        if let Some(&nan_count) = self.nan_count(field_id)
+            && nan_count == 0
+        {
+            return ROWS_MUST_MATCH;
         }
 
         if self.contains_nulls_only(field_id) {
@@ -258,10 +258,10 @@ impl BoundPredicateVisitor for StrictMetricsEvaluator<'_> 
{
     ) -> crate::Result<bool> {
         let field_id = reference.field().id;
 
-        if let Some(lower) = self.lower_bound(field_id) {
-            if lower.is_nan() {
-                return ROWS_MIGHT_NOT_MATCH;
-            }
+        if let Some(lower) = self.lower_bound(field_id)
+            && lower.is_nan()
+        {
+            return ROWS_MIGHT_NOT_MATCH;
         }
 
         self.visit_inequality(reference, datum, PartialOrd::gt, true)
diff --git a/crates/iceberg/src/inspect/metadata_table.rs 
b/crates/iceberg/src/inspect/metadata_table.rs
index 92571db18..d5e9d6086 100644
--- a/crates/iceberg/src/inspect/metadata_table.rs
+++ b/crates/iceberg/src/inspect/metadata_table.rs
@@ -71,12 +71,12 @@ impl<'a> MetadataTable<'a> {
     }
 
     /// Get the snapshots table.
-    pub fn snapshots(&self) -> SnapshotsTable {
+    pub fn snapshots(&self) -> SnapshotsTable<'_> {
         SnapshotsTable::new(self.0)
     }
 
     /// Get the manifests table.
-    pub fn manifests(&self) -> ManifestsTable {
+    pub fn manifests(&self) -> ManifestsTable<'_> {
         ManifestsTable::new(self.0)
     }
 }
diff --git a/crates/iceberg/src/io/storage.rs b/crates/iceberg/src/io/storage.rs
index 5880ccca5..03e43600d 100644
--- a/crates/iceberg/src/io/storage.rs
+++ b/crates/iceberg/src/io/storage.rs
@@ -183,7 +183,7 @@ impl Storage {
                 } else {
                     Err(Error::new(
                         ErrorKind::DataInvalid,
-                        format!("Invalid gcs url: {}, should start with {}", 
path, prefix),
+                        format!("Invalid gcs url: {path}, should start with 
{prefix}"),
                     ))
                 }
             }
@@ -198,7 +198,7 @@ impl Storage {
                 } else {
                     Err(Error::new(
                         ErrorKind::DataInvalid,
-                        format!("Invalid oss url: {}, should start with {}", 
path, prefix),
+                        format!("Invalid oss url: {path}, should start with 
{prefix}"),
                     ))
                 }
             }
diff --git a/crates/iceberg/src/io/storage_azdls.rs 
b/crates/iceberg/src/io/storage_azdls.rs
index fe12167f6..5abb0cd6e 100644
--- a/crates/iceberg/src/io/storage_azdls.rs
+++ b/crates/iceberg/src/io/storage_azdls.rs
@@ -165,7 +165,7 @@ impl FromStr for AzureStorageScheme {
             "wasbs" => Ok(AzureStorageScheme::Wasbs),
             _ => Err(Error::new(
                 ErrorKind::DataInvalid,
-                format!("Unexpected Azure Storage scheme: {}", s),
+                format!("Unexpected Azure Storage scheme: {s}"),
             )),
         }
     }
@@ -397,11 +397,11 @@ mod tests {
             let config = azdls_config_parse(properties);
             match expected {
                 Some(expected_config) => {
-                    assert!(config.is_ok(), "Test case {} failed: {:?}", name, 
config);
-                    assert_eq!(config.unwrap(), expected_config, "Test case: 
{}", name);
+                    assert!(config.is_ok(), "Test case {name} failed: 
{config:?}");
+                    assert_eq!(config.unwrap(), expected_config, "Test case: 
{name}");
                 }
                 None => {
-                    assert!(config.is_err(), "Test case {} expected error.", 
name);
+                    assert!(config.is_err(), "Test case {name} expected 
error.");
                 }
             }
         }
@@ -495,14 +495,14 @@ mod tests {
             let result = azdls_create_operator(input.0, &input.1, &input.2);
             match expected {
                 Some((expected_filesystem, expected_path)) => {
-                    assert!(result.is_ok(), "Test case {} failed: {:?}", name, 
result);
+                    assert!(result.is_ok(), "Test case {name} failed: 
{result:?}");
 
                     let (op, relative_path) = result.unwrap();
                     assert_eq!(op.info().name(), expected_filesystem);
                     assert_eq!(relative_path, expected_path);
                 }
                 None => {
-                    assert!(result.is_err(), "Test case {} expected error.", 
name);
+                    assert!(result.is_err(), "Test case {name} expected 
error.");
                 }
             }
         }
@@ -543,11 +543,11 @@ mod tests {
             let result = input.parse::<AzureStoragePath>();
             match expected {
                 Some(expected_path) => {
-                    assert!(result.is_ok(), "Test case {} failed: {:?}", name, 
result);
-                    assert_eq!(result.unwrap(), expected_path, "Test case: 
{}", name);
+                    assert!(result.is_ok(), "Test case {name} failed: 
{result:?}");
+                    assert_eq!(result.unwrap(), expected_path, "Test case: 
{name}");
                 }
                 None => {
-                    assert!(result.is_err(), "Test case {} expected error.", 
name);
+                    assert!(result.is_err(), "Test case {name} expected 
error.");
                 }
             }
         }
@@ -593,7 +593,7 @@ mod tests {
 
         for (name, path, expected) in test_cases {
             let endpoint = path.as_endpoint();
-            assert_eq!(endpoint, expected, "Test case: {}", name);
+            assert_eq!(endpoint, expected, "Test case: {name}");
         }
     }
 }
diff --git a/crates/iceberg/src/io/storage_gcs.rs 
b/crates/iceberg/src/io/storage_gcs.rs
index 8c3d914c8..7718df603 100644
--- a/crates/iceberg/src/io/storage_gcs.rs
+++ b/crates/iceberg/src/io/storage_gcs.rs
@@ -71,20 +71,20 @@ pub(crate) fn gcs_config_parse(mut m: HashMap<String, 
String>) -> Result<GcsConf
         cfg.disable_config_load = true;
     }
 
-    if let Some(allow_anonymous) = m.remove(GCS_ALLOW_ANONYMOUS) {
-        if is_truthy(allow_anonymous.to_lowercase().as_str()) {
-            cfg.allow_anonymous = true;
-        }
+    if let Some(allow_anonymous) = m.remove(GCS_ALLOW_ANONYMOUS)
+        && is_truthy(allow_anonymous.to_lowercase().as_str())
+    {
+        cfg.allow_anonymous = true;
     }
-    if let Some(disable_ec2_metadata) = m.remove(GCS_DISABLE_VM_METADATA) {
-        if is_truthy(disable_ec2_metadata.to_lowercase().as_str()) {
-            cfg.disable_vm_metadata = true;
-        }
+    if let Some(disable_ec2_metadata) = m.remove(GCS_DISABLE_VM_METADATA)
+        && is_truthy(disable_ec2_metadata.to_lowercase().as_str())
+    {
+        cfg.disable_vm_metadata = true;
     };
-    if let Some(disable_config_load) = m.remove(GCS_DISABLE_CONFIG_LOAD) {
-        if is_truthy(disable_config_load.to_lowercase().as_str()) {
-            cfg.disable_config_load = true;
-        }
+    if let Some(disable_config_load) = m.remove(GCS_DISABLE_CONFIG_LOAD)
+        && is_truthy(disable_config_load.to_lowercase().as_str())
+    {
+        cfg.disable_config_load = true;
     };
 
     Ok(cfg)
@@ -96,7 +96,7 @@ pub(crate) fn gcs_config_build(cfg: &GcsConfig, path: &str) 
-> Result<Operator>
     let bucket = url.host_str().ok_or_else(|| {
         Error::new(
             ErrorKind::DataInvalid,
-            format!("Invalid gcs url: {}, bucket is required", path),
+            format!("Invalid gcs url: {path}, bucket is required"),
         )
     })?;
 
diff --git a/crates/iceberg/src/io/storage_oss.rs 
b/crates/iceberg/src/io/storage_oss.rs
index 8bfffc6ca..e82dda23a 100644
--- a/crates/iceberg/src/io/storage_oss.rs
+++ b/crates/iceberg/src/io/storage_oss.rs
@@ -56,7 +56,7 @@ pub(crate) fn oss_config_build(cfg: &OssConfig, path: &str) 
-> Result<Operator>
     let bucket = url.host_str().ok_or_else(|| {
         Error::new(
             ErrorKind::DataInvalid,
-            format!("Invalid oss url: {}, missing bucket", path),
+            format!("Invalid oss url: {path}, missing bucket"),
         )
     })?;
 
diff --git a/crates/iceberg/src/io/storage_s3.rs 
b/crates/iceberg/src/io/storage_s3.rs
index fcf9afed1..f069e0e2f 100644
--- a/crates/iceberg/src/io/storage_s3.rs
+++ b/crates/iceberg/src/io/storage_s3.rs
@@ -134,20 +134,20 @@ pub(crate) fn s3_config_parse(mut m: HashMap<String, 
String>) -> Result<S3Config
         }
     };
 
-    if let Some(allow_anonymous) = m.remove(S3_ALLOW_ANONYMOUS) {
-        if is_truthy(allow_anonymous.to_lowercase().as_str()) {
-            cfg.allow_anonymous = true;
-        }
+    if let Some(allow_anonymous) = m.remove(S3_ALLOW_ANONYMOUS)
+        && is_truthy(allow_anonymous.to_lowercase().as_str())
+    {
+        cfg.allow_anonymous = true;
     }
-    if let Some(disable_ec2_metadata) = m.remove(S3_DISABLE_EC2_METADATA) {
-        if is_truthy(disable_ec2_metadata.to_lowercase().as_str()) {
-            cfg.disable_ec2_metadata = true;
-        }
+    if let Some(disable_ec2_metadata) = m.remove(S3_DISABLE_EC2_METADATA)
+        && is_truthy(disable_ec2_metadata.to_lowercase().as_str())
+    {
+        cfg.disable_ec2_metadata = true;
     };
-    if let Some(disable_config_load) = m.remove(S3_DISABLE_CONFIG_LOAD) {
-        if is_truthy(disable_config_load.to_lowercase().as_str()) {
-            cfg.disable_config_load = true;
-        }
+    if let Some(disable_config_load) = m.remove(S3_DISABLE_CONFIG_LOAD)
+        && is_truthy(disable_config_load.to_lowercase().as_str())
+    {
+        cfg.disable_config_load = true;
     };
 
     Ok(cfg)
diff --git a/crates/iceberg/src/metadata_columns.rs 
b/crates/iceberg/src/metadata_columns.rs
index b11b5cadb..b388e40c4 100644
--- a/crates/iceberg/src/metadata_columns.rs
+++ b/crates/iceberg/src/metadata_columns.rs
@@ -74,14 +74,13 @@ pub fn get_metadata_field(field_id: i32) -> 
Result<NestedFieldRef> {
             Err(Error::new(
                 ErrorKind::Unexpected,
                 format!(
-                    "Metadata field ID {} recognized but field definition not 
implemented",
-                    field_id
+                    "Metadata field ID {field_id} recognized but field 
definition not implemented"
                 ),
             ))
         }
         _ => Err(Error::new(
             ErrorKind::Unexpected,
-            format!("Field ID {} is not a metadata field", field_id),
+            format!("Field ID {field_id} is not a metadata field"),
         )),
     }
 }
diff --git a/crates/iceberg/src/scan/mod.rs b/crates/iceberg/src/scan/mod.rs
index 24c03b0b2..d83da8a87 100644
--- a/crates/iceberg/src/scan/mod.rs
+++ b/crates/iceberg/src/scan/mod.rs
@@ -1872,8 +1872,7 @@ pub mod tests {
         let file_path = string_values.value(0);
         assert!(
             file_path.ends_with(".parquet"),
-            "File path should end with .parquet, got: {}",
-            file_path
+            "File path should end with .parquet, got: {file_path}"
         );
     }
 
@@ -1981,8 +1980,7 @@ pub mod tests {
         for path in &file_paths {
             assert!(
                 path.ends_with(".parquet"),
-                "All file paths should end with .parquet, got: {}",
-                path
+                "All file paths should end with .parquet, got: {path}"
             );
         }
     }
diff --git a/crates/iceberg/src/spec/datatypes.rs 
b/crates/iceberg/src/spec/datatypes.rs
index 456b75440..037946558 100644
--- a/crates/iceberg/src/spec/datatypes.rs
+++ b/crates/iceberg/src/spec/datatypes.rs
@@ -427,8 +427,7 @@ impl<'de> Deserialize<'de> for StructType {
                             let type_val: String = map.next_value()?;
                             if type_val != "struct" {
                                 return Err(serde::de::Error::custom(format!(
-                                    "expected type 'struct', got '{}'",
-                                    type_val
+                                    "expected type 'struct', got '{type_val}'"
                                 )));
                             }
                         }
diff --git a/crates/iceberg/src/spec/manifest/writer.rs 
b/crates/iceberg/src/spec/manifest/writer.rs
index ebb0590bc..389ac7a1f 100644
--- a/crates/iceberg/src/spec/manifest/writer.rs
+++ b/crates/iceberg/src/spec/manifest/writer.rs
@@ -388,10 +388,10 @@ impl ManifestWriter {
                 self.existing_rows += entry.data_file.record_count;
             }
         }
-        if entry.is_alive() {
-            if let Some(seq_num) = entry.sequence_number {
-                self.min_seq_num = Some(self.min_seq_num.map_or(seq_num, |v| 
min(v, seq_num)));
-            }
+        if entry.is_alive()
+            && let Some(seq_num) = entry.sequence_number
+        {
+            self.min_seq_num = Some(self.min_seq_num.map_or(seq_num, |v| 
min(v, seq_num)));
         }
         self.manifest_entries.push(entry);
         Ok(())
diff --git a/crates/iceberg/src/spec/schema/prune_columns.rs 
b/crates/iceberg/src/spec/schema/prune_columns.rs
index 5a2f0b50f..14f1bfd25 100644
--- a/crates/iceberg/src/spec/schema/prune_columns.rs
+++ b/crates/iceberg/src/spec/schema/prune_columns.rs
@@ -110,19 +110,19 @@ impl SchemaVisitor for PruneColumn {
             if self.select_full_types {
                 Ok(Some(*field.field_type.clone()))
             } else if field.field_type.is_struct() {
-                return 
Ok(Some(Type::Struct(PruneColumn::project_selected_struct(
+                Ok(Some(Type::Struct(PruneColumn::project_selected_struct(
                     value,
-                )?)));
+                )?)))
             } else if !field.field_type.is_nested() {
-                return Ok(Some(*field.field_type.clone()));
+                Ok(Some(*field.field_type.clone()))
             } else {
-                return Err(Error::new(
+                Err(Error::new(
                     ErrorKind::DataInvalid,
                     "Can't project list or map field directly when not 
selecting full type."
                         .to_string(),
                 )
                 .with_context("field_id", field.id.to_string())
-                .with_context("field_type", field.field_type.to_string()));
+                .with_context("field_type", field.field_type.to_string()))
             }
         } else {
             Ok(value)
@@ -174,20 +174,20 @@ impl SchemaVisitor for PruneColumn {
                 Ok(Some(Type::List(list.clone())))
             } else if list.element_field.field_type.is_struct() {
                 let projected_struct = 
PruneColumn::project_selected_struct(value).unwrap();
-                return Ok(Some(Type::List(PruneColumn::project_list(
+                Ok(Some(Type::List(PruneColumn::project_list(
                     list,
                     Type::Struct(projected_struct),
-                )?)));
+                )?)))
             } else if list.element_field.field_type.is_primitive() {
-                return Ok(Some(Type::List(list.clone())));
+                Ok(Some(Type::List(list.clone())))
             } else {
-                return Err(Error::new(
+                Err(Error::new(
                     ErrorKind::DataInvalid,
                     format!(
                         "Cannot explicitly project List or Map types, List 
element {} of type {} was selected",
                         list.element_field.id, list.element_field.field_type
                     ),
-                ));
+                ))
             }
         } else if let Some(result) = value {
             Ok(Some(Type::List(PruneColumn::project_list(list, result)?)))
@@ -208,26 +208,26 @@ impl SchemaVisitor for PruneColumn {
             } else if map.value_field.field_type.is_struct() {
                 let projected_struct =
                     
PruneColumn::project_selected_struct(Some(value.unwrap())).unwrap();
-                return Ok(Some(Type::Map(PruneColumn::project_map(
+                Ok(Some(Type::Map(PruneColumn::project_map(
                     map,
                     Type::Struct(projected_struct),
-                )?)));
+                )?)))
             } else if map.value_field.field_type.is_primitive() {
-                return Ok(Some(Type::Map(map.clone())));
+                Ok(Some(Type::Map(map.clone())))
             } else {
-                return Err(Error::new(
+                Err(Error::new(
                     ErrorKind::DataInvalid,
                     format!(
                         "Cannot explicitly project List or Map types, Map 
value {} of type {} was selected",
                         map.value_field.id, map.value_field.field_type
                     ),
-                ));
+                ))
             }
         } else if let Some(value_result) = value {
-            return Ok(Some(Type::Map(PruneColumn::project_map(
+            Ok(Some(Type::Map(PruneColumn::project_map(
                 map,
                 value_result,
-            )?)));
+            )?)))
         } else if self.selected.contains(&map.key_field.id) {
             Ok(Some(Type::Map(map.clone())))
         } else {
diff --git a/crates/iceberg/src/spec/table_metadata.rs 
b/crates/iceberg/src/spec/table_metadata.rs
index 06b32cc84..48b715da5 100644
--- a/crates/iceberg/src/spec/table_metadata.rs
+++ b/crates/iceberg/src/spec/table_metadata.rs
@@ -390,18 +390,18 @@ impl TableMetadata {
     }
 
     fn construct_refs(&mut self) {
-        if let Some(current_snapshot_id) = self.current_snapshot_id {
-            if !self.refs.contains_key(MAIN_BRANCH) {
-                self.refs
-                    .insert(MAIN_BRANCH.to_string(), SnapshotReference {
-                        snapshot_id: current_snapshot_id,
-                        retention: SnapshotRetention::Branch {
-                            min_snapshots_to_keep: None,
-                            max_snapshot_age_ms: None,
-                            max_ref_age_ms: None,
-                        },
-                    });
-            }
+        if let Some(current_snapshot_id) = self.current_snapshot_id
+            && !self.refs.contains_key(MAIN_BRANCH)
+        {
+            self.refs
+                .insert(MAIN_BRANCH.to_string(), SnapshotReference {
+                    snapshot_id: current_snapshot_id,
+                    retention: SnapshotRetention::Branch {
+                        min_snapshots_to_keep: None,
+                        max_snapshot_age_ms: None,
+                        max_ref_age_ms: None,
+                    },
+                });
         }
     }
 
@@ -572,17 +572,17 @@ impl TableMetadata {
 
         let main_ref = self.refs.get(MAIN_BRANCH);
         if self.current_snapshot_id.is_some() {
-            if let Some(main_ref) = main_ref {
-                if main_ref.snapshot_id != 
self.current_snapshot_id.unwrap_or_default() {
-                    return Err(Error::new(
-                        ErrorKind::DataInvalid,
-                        format!(
-                            "Current snapshot id does not match main branch 
({:?} != {:?})",
-                            self.current_snapshot_id.unwrap_or_default(),
-                            main_ref.snapshot_id
-                        ),
-                    ));
-                }
+            if let Some(main_ref) = main_ref
+                && main_ref.snapshot_id != 
self.current_snapshot_id.unwrap_or_default()
+            {
+                return Err(Error::new(
+                    ErrorKind::DataInvalid,
+                    format!(
+                        "Current snapshot id does not match main branch ({:?} 
!= {:?})",
+                        self.current_snapshot_id.unwrap_or_default(),
+                        main_ref.snapshot_id
+                    ),
+                ));
             }
         } else if main_ref.is_some() {
             return Err(Error::new(
@@ -606,22 +606,21 @@ impl TableMetadata {
             ));
         }
 
-        if self.format_version >= FormatVersion::V2 {
-            if let Some(snapshot) = self
+        if self.format_version >= FormatVersion::V2
+            && let Some(snapshot) = self
                 .snapshots
                 .values()
                 .find(|snapshot| snapshot.sequence_number() > 
self.last_sequence_number)
-            {
-                return Err(Error::new(
-                    ErrorKind::DataInvalid,
-                    format!(
-                        "Invalid snapshot with id {} and sequence number {} 
greater than last sequence number {}",
-                        snapshot.snapshot_id(),
-                        snapshot.sequence_number(),
-                        self.last_sequence_number
-                    ),
-                ));
-            }
+        {
+            return Err(Error::new(
+                ErrorKind::DataInvalid,
+                format!(
+                    "Invalid snapshot with id {} and sequence number {} 
greater than last sequence number {}",
+                    snapshot.snapshot_id(),
+                    snapshot.sequence_number(),
+                    self.last_sequence_number
+                ),
+            ));
         }
 
         Ok(())
diff --git a/crates/iceberg/src/spec/transform.rs 
b/crates/iceberg/src/spec/transform.rs
index 6068716ef..354dc1889 100644
--- a/crates/iceberg/src/spec/transform.rs
+++ b/crates/iceberg/src/spec/transform.rs
@@ -711,10 +711,10 @@ impl Transform {
             PredicateOperator::GreaterThan => 
Some(PredicateOperator::GreaterThanOrEq),
             PredicateOperator::StartsWith => match datum.literal() {
                 PrimitiveLiteral::String(s) => {
-                    if let Some(w) = width {
-                        if s.len() == w as usize {
-                            return Some(PredicateOperator::Eq);
-                        };
+                    if let Some(w) = width
+                        && s.len() == w as usize
+                    {
+                        return Some(PredicateOperator::Eq);
                     };
                     Some(*op)
                 }
@@ -757,47 +757,45 @@ impl Transform {
             _ => false,
         };
 
-        if should_adjust {
-            if let &PrimitiveLiteral::Int(v) = transformed.literal() {
-                match op {
-                    PredicateOperator::LessThan
-                    | PredicateOperator::LessThanOrEq
-                    | PredicateOperator::In => {
-                        if v < 0 {
+        if should_adjust && let &PrimitiveLiteral::Int(v) = 
transformed.literal() {
+            match op {
+                PredicateOperator::LessThan
+                | PredicateOperator::LessThanOrEq
+                | PredicateOperator::In => {
+                    if v < 0 {
+                        // # TODO
+                        // An ugly hack to fix. Refine the increment and 
decrement logic later.
+                        match self {
+                            Transform::Day => {
+                                return 
Some(AdjustedProjection::Single(Datum::date(v + 1)));
+                            }
+                            _ => {
+                                return 
Some(AdjustedProjection::Single(Datum::int(v + 1)));
+                            }
+                        }
+                    };
+                }
+                PredicateOperator::Eq => {
+                    if v < 0 {
+                        let new_set = FnvHashSet::from_iter(vec![
+                            transformed.to_owned(),
                             // # TODO
                             // An ugly hack to fix. Refine the increment and 
decrement logic later.
-                            match self {
-                                Transform::Day => {
-                                    return 
Some(AdjustedProjection::Single(Datum::date(v + 1)));
+                            {
+                                match self {
+                                    Transform::Day => Datum::date(v + 1),
+                                    _ => Datum::int(v + 1),
                                 }
-                                _ => {
-                                    return 
Some(AdjustedProjection::Single(Datum::int(v + 1)));
-                                }
-                            }
-                        };
-                    }
-                    PredicateOperator::Eq => {
-                        if v < 0 {
-                            let new_set = FnvHashSet::from_iter(vec![
-                                transformed.to_owned(),
-                                // # TODO
-                                // An ugly hack to fix. Refine the increment 
and decrement logic later.
-                                {
-                                    match self {
-                                        Transform::Day => Datum::date(v + 1),
-                                        _ => Datum::int(v + 1),
-                                    }
-                                },
-                            ]);
-                            return Some(AdjustedProjection::Set(new_set));
-                        }
-                    }
-                    _ => {
-                        return None;
+                            },
+                        ]);
+                        return Some(AdjustedProjection::Set(new_set));
                     }
                 }
-            };
-        }
+                _ => {
+                    return None;
+                }
+            }
+        };
         None
     }
 
diff --git a/crates/iceberg/src/spec/values/tests.rs 
b/crates/iceberg/src/spec/values/tests.rs
index 0e99d44df..73343a9a1 100644
--- a/crates/iceberg/src/spec/values/tests.rs
+++ b/crates/iceberg/src/spec/values/tests.rs
@@ -447,7 +447,7 @@ fn check_raw_literal_bytes_error_via_avro(input_bytes: 
Vec<u8>, expected_type: &
     let avro_value = Value::Bytes(input_bytes);
     let raw_literal: _serde::RawLiteral = 
apache_avro::from_value(&avro_value).unwrap();
     let result = raw_literal.try_into(expected_type);
-    assert!(result.is_err(), "Expected error but got: {:?}", result);
+    assert!(result.is_err(), "Expected error but got: {result:?}");
 }
 
 #[test]
diff --git a/crates/iceberg/src/spec/view_metadata_builder.rs 
b/crates/iceberg/src/spec/view_metadata_builder.rs
index 9f542a7c6..38041ca62 100644
--- a/crates/iceberg/src/spec/view_metadata_builder.rs
+++ b/crates/iceberg/src/spec/view_metadata_builder.rs
@@ -478,10 +478,10 @@ impl ViewMetadataBuilder {
         // as it might panic if the metadata is invalid.
         self.metadata.validate()?;
 
-        if let Some(previous) = self.previous_view_version.take() {
-            if !allow_replace_drop_dialects(&self.metadata.properties) {
-                require_no_dialect_dropped(&previous, 
self.metadata.current_version())?;
-            }
+        if let Some(previous) = self.previous_view_version.take()
+            && !allow_replace_drop_dialects(&self.metadata.properties)
+        {
+            require_no_dialect_dropped(&previous, 
self.metadata.current_version())?;
         }
 
         let _expired_versions = self.expire_versions();
diff --git a/crates/iceberg/src/transaction/mod.rs 
b/crates/iceberg/src/transaction/mod.rs
index 4116264a1..8ddaa2669 100644
--- a/crates/iceberg/src/transaction/mod.rs
+++ b/crates/iceberg/src/transaction/mod.rs
@@ -518,7 +518,7 @@ mod test_row_lineage {
         fn file_with_rows(record_count: u64) -> DataFile {
             DataFileBuilder::default()
                 .content(DataContentType::Data)
-                .file_path(format!("test/{}.parquet", record_count))
+                .file_path(format!("test/{record_count}.parquet"))
                 .file_format(DataFileFormat::Parquet)
                 .file_size_in_bytes(100)
                 .record_count(record_count)
diff --git a/crates/iceberg/src/transaction/snapshot.rs 
b/crates/iceberg/src/transaction/snapshot.rs
index d59828ce3..c8bf26a17 100644
--- a/crates/iceberg/src/transaction/snapshot.rs
+++ b/crates/iceberg/src/transaction/snapshot.rs
@@ -276,13 +276,13 @@ impl<'a> SnapshotProducer<'a> {
                     "Partition field should only be primitive type.",
                 )
             })?;
-            if let Some(value) = value {
-                if !field.compatible(&value.as_primitive_literal().unwrap()) {
-                    return Err(Error::new(
-                        ErrorKind::DataInvalid,
-                        "Partition value is not compatible partition type",
-                    ));
-                }
+            if let Some(value) = value
+                && !field.compatible(&value.as_primitive_literal().unwrap())
+            {
+                return Err(Error::new(
+                    ErrorKind::DataInvalid,
+                    "Partition value is not compatible partition type",
+                ));
             }
         }
         Ok(())
diff --git a/crates/iceberg/src/writer/file_writer/rolling_writer.rs 
b/crates/iceberg/src/writer/file_writer/rolling_writer.rs
index 06246ab66..a93e494d4 100644
--- a/crates/iceberg/src/writer/file_writer/rolling_writer.rs
+++ b/crates/iceberg/src/writer/file_writer/rolling_writer.rs
@@ -197,18 +197,18 @@ where
             );
         }
 
-        if self.should_roll() {
-            if let Some(inner) = self.inner.take() {
-                // close the current writer, roll to a new file
-                self.data_file_builders.extend(inner.close().await?);
-
-                // start a new writer
-                self.inner = Some(
-                    self.inner_builder
-                        .build(self.new_output_file(partition_key)?)
-                        .await?,
-                );
-            }
+        if self.should_roll()
+            && let Some(inner) = self.inner.take()
+        {
+            // close the current writer, roll to a new file
+            self.data_file_builders.extend(inner.close().await?);
+
+            // start a new writer
+            self.inner = Some(
+                self.inner_builder
+                    .build(self.new_output_file(partition_key)?)
+                    .await?,
+            );
         }
 
         // write the input
diff --git a/crates/iceberg/tests/file_io_gcs_test.rs 
b/crates/iceberg/tests/file_io_gcs_test.rs
index 161285ae6..9fbcdadd0 100644
--- a/crates/iceberg/tests/file_io_gcs_test.rs
+++ b/crates/iceberg/tests/file_io_gcs_test.rs
@@ -68,7 +68,7 @@ mod tests {
 
         FileIOBuilder::new("gcs")
             .with_props(vec![
-                (GCS_SERVICE_PATH, format!("http://{}";, addr)),
+                (GCS_SERVICE_PATH, format!("http://{addr}";)),
                 (GCS_NO_AUTH, "true".to_string()),
             ])
             .build()
@@ -81,13 +81,13 @@ mod tests {
         bucket_data.insert("name", name);
 
         let client = reqwest::Client::new();
-        let endpoint = format!("http://{}/storage/v1/b";, server_addr);
+        let endpoint = format!("http://{server_addr}/storage/v1/b";);
         client.post(endpoint).json(&bucket_data).send().await?;
         Ok(())
     }
 
     fn get_gs_path() -> String {
-        format!("gs://{}", FAKE_GCS_BUCKET)
+        format!("gs://{FAKE_GCS_BUCKET}")
     }
 
     #[tokio::test]
diff --git a/crates/integrations/datafusion/src/physical_plan/repartition.rs 
b/crates/integrations/datafusion/src/physical_plan/repartition.rs
index 8ad87fd1c..2d1d7f862 100644
--- a/crates/integrations/datafusion/src/physical_plan/repartition.rs
+++ b/crates/integrations/datafusion/src/physical_plan/repartition.rs
@@ -159,9 +159,8 @@ fn determine_partitioning_strategy(
 
         // Case 2: Partitioned table missing _partition column (normally this 
should not happen)
         (true, Err(_)) => Err(DataFusionError::Plan(format!(
-            "Partitioned table input missing {} column. \
-             Ensure projection happens before repartitioning.",
-            PROJECTED_PARTITION_VALUE_COLUMN
+            "Partitioned table input missing 
{PROJECTED_PARTITION_VALUE_COLUMN} column. \
+             Ensure projection happens before repartitioning."
         ))),
 
         // Case 3: Unpartitioned table, always use RoundRobinBatch
@@ -508,8 +507,7 @@ mod tests {
 
                 assert!(
                     
column_names.contains(&PROJECTED_PARTITION_VALUE_COLUMN.to_string()),
-                    "Should use _partition column, got: {:?}",
-                    column_names
+                    "Should use _partition column, got: {column_names:?}"
                 );
             }
             _ => panic!("Expected Hash partitioning with Identity transform"),
@@ -733,8 +731,7 @@ mod tests {
                     .collect();
                 assert!(
                     
column_names.contains(&PROJECTED_PARTITION_VALUE_COLUMN.to_string()),
-                    "Should use _partition column for mixed transforms with 
Identity, got: {:?}",
-                    column_names
+                    "Should use _partition column for mixed transforms with 
Identity, got: {column_names:?}"
                 );
             }
             _ => panic!("Expected Hash partitioning for table with identity 
transforms"),
diff --git a/crates/integrations/datafusion/src/physical_plan/sort.rs 
b/crates/integrations/datafusion/src/physical_plan/sort.rs
index 2a57e16e4..ede254753 100644
--- a/crates/integrations/datafusion/src/physical_plan/sort.rs
+++ b/crates/integrations/datafusion/src/physical_plan/sort.rs
@@ -53,8 +53,7 @@ pub(crate) fn sort_by_partition(input: Arc<dyn 
ExecutionPlan>) -> DFResult<Arc<d
         .column_with_name(PROJECTED_PARTITION_VALUE_COLUMN)
         .ok_or_else(|| {
             DataFusionError::Plan(format!(
-                "Partition column '{}' not found in schema. Ensure the plan 
has been extended with partition values using project_with_partition.",
-                PROJECTED_PARTITION_VALUE_COLUMN
+                "Partition column '{PROJECTED_PARTITION_VALUE_COLUMN}' not 
found in schema. Ensure the plan has been extended with partition values using 
project_with_partition."
             ))
         })?;
 
diff --git a/crates/integrations/datafusion/src/table/mod.rs 
b/crates/integrations/datafusion/src/table/mod.rs
index 8527668d6..86a79611b 100644
--- a/crates/integrations/datafusion/src/table/mod.rs
+++ b/crates/integrations/datafusion/src/table/mod.rs
@@ -379,7 +379,7 @@ mod tests {
 
         let table_creation = TableCreation::builder()
             .name("test_table".to_string())
-            .location(format!("{}/test_table", warehouse_path))
+            .location(format!("{warehouse_path}/test_table"))
             .schema(schema)
             .properties(HashMap::new())
             .build();
diff --git 
a/crates/integrations/datafusion/tests/integration_datafusion_test.rs 
b/crates/integrations/datafusion/tests/integration_datafusion_test.rs
index 3ad84f383..06d9cab03 100644
--- a/crates/integrations/datafusion/tests/integration_datafusion_test.rs
+++ b/crates/integrations/datafusion/tests/integration_datafusion_test.rs
@@ -923,25 +923,22 @@ async fn test_insert_into_partitioned() -> Result<()> {
     let file_io = table.file_io();
 
     // List files under each expected partition path
-    let electronics_path = format!("{}/data/category=electronics", 
table_location);
-    let books_path = format!("{}/data/category=books", table_location);
-    let clothing_path = format!("{}/data/category=clothing", table_location);
+    let electronics_path = 
format!("{table_location}/data/category=electronics");
+    let books_path = format!("{table_location}/data/category=books");
+    let clothing_path = format!("{table_location}/data/category=clothing");
 
     // Verify partition directories exist and contain data files
     assert!(
         file_io.exists(&electronics_path).await?,
-        "Expected partition directory: {}",
-        electronics_path
+        "Expected partition directory: {electronics_path}"
     );
     assert!(
         file_io.exists(&books_path).await?,
-        "Expected partition directory: {}",
-        books_path
+        "Expected partition directory: {books_path}"
     );
     assert!(
         file_io.exists(&clothing_path).await?,
-        "Expected partition directory: {}",
-        clothing_path
+        "Expected partition directory: {clothing_path}"
     );
 
     Ok(())
diff --git a/rust-toolchain.toml b/rust-toolchain.toml
index ff7d1f7fb..4b20d68e4 100644
--- a/rust-toolchain.toml
+++ b/rust-toolchain.toml
@@ -20,5 +20,5 @@
 #
 # The channel is exactly same day for our MSRV.
 [toolchain]
-channel = "nightly-2025-03-28"
+channel = "nightly-2025-06-23"
 components = ["rustfmt", "clippy"]

Reply via email to