This is an automated email from the ASF dual-hosted git repository.

alamb pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/datafusion.git


The following commit(s) were added to refs/heads/main by this push:
     new 563da9290f Enable `used_underscore_binding` clippy lint (#15189)
563da9290f is described below

commit 563da9290ffe579e5e5f15c85cdb2fa25025d716
Author: Shreyas (Lua) <[email protected]>
AuthorDate: Sat Mar 15 05:08:14 2025 -0700

    Enable `used_underscore_binding` clippy lint (#15189)
    
    * datafusion-sql underscore-binding resolved
    
    * datafusion-optimizer underscore-binding resolved
    
    * datafusion-physical-plan underscore-binding resolved
    
    * datafusion-physical-optimizer underscore-binding resolved
    
    * Update /physical-plan/...../nested_loop_join.rs to expect dead_code
    
    Co-authored-by: Andrew Lamb <[email protected]>
    
    * correcting formatting a bit more
    
    ---------
    
    Co-authored-by: Andrew Lamb <[email protected]>
---
 Cargo.toml                                                | 1 +
 datafusion/core/src/datasource/stream.rs                  | 4 ++--
 datafusion/optimizer/src/eliminate_limit.rs               | 1 +
 datafusion/optimizer/src/push_down_filter.rs              | 1 +
 datafusion/optimizer/src/push_down_limit.rs               | 1 +
 datafusion/physical-optimizer/src/aggregate_statistics.rs | 6 +++---
 datafusion/physical-plan/src/joins/nested_loop_join.rs    | 8 +++++---
 datafusion/sql/src/unparser/dialect.rs                    | 4 ++--
 8 files changed, 16 insertions(+), 10 deletions(-)

diff --git a/Cargo.toml b/Cargo.toml
index 0f55d2e100..5a9088fe2e 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -198,6 +198,7 @@ incremental = false
 [workspace.lints.clippy]
 # Detects large stack-allocated futures that may cause stack overflow crashes 
(see threshold in clippy.toml)
 large_futures = "warn"
+used_underscore_binding = "warn"
 
 [workspace.lints.rust]
 unexpected_cfgs = { level = "warn", check-cfg = ["cfg(tarpaulin)"] }
diff --git a/datafusion/core/src/datasource/stream.rs 
b/datafusion/core/src/datasource/stream.rs
index d5fe070be8..ffb4860544 100644
--- a/datafusion/core/src/datasource/stream.rs
+++ b/datafusion/core/src/datasource/stream.rs
@@ -400,8 +400,8 @@ impl PartitionStream for StreamRead {
 struct StreamWrite(Arc<StreamConfig>);
 
 impl DisplayAs for StreamWrite {
-    fn fmt_as(&self, _t: DisplayFormatType, f: &mut Formatter) -> 
std::fmt::Result {
-        self.0.source.stream_write_display(_t, f)
+    fn fmt_as(&self, t: DisplayFormatType, f: &mut Formatter) -> 
std::fmt::Result {
+        self.0.source.stream_write_display(t, f)
     }
 }
 
diff --git a/datafusion/optimizer/src/eliminate_limit.rs 
b/datafusion/optimizer/src/eliminate_limit.rs
index 267615c3e0..5d3a1b223b 100644
--- a/datafusion/optimizer/src/eliminate_limit.rs
+++ b/datafusion/optimizer/src/eliminate_limit.rs
@@ -77,6 +77,7 @@ impl OptimizerRule for EliminateLimit {
                 } else if matches!(limit.get_skip_type()?, 
SkipType::Literal(0)) {
                     // If fetch is `None` and skip is 0, then Limit takes no 
effect and
                     // we can remove it. Its input also can be Limit, so we 
should apply again.
+                    #[allow(clippy::used_underscore_binding)]
                     return self.rewrite(Arc::unwrap_or_clone(limit.input), 
_config);
                 }
                 Ok(Transformed::no(LogicalPlan::Limit(limit)))
diff --git a/datafusion/optimizer/src/push_down_filter.rs 
b/datafusion/optimizer/src/push_down_filter.rs
index 0dbb78a268..c9617514e4 100644
--- a/datafusion/optimizer/src/push_down_filter.rs
+++ b/datafusion/optimizer/src/push_down_filter.rs
@@ -799,6 +799,7 @@ impl OptimizerRule for PushDownFilter {
                     new_predicate,
                     child_filter.input,
                 )?);
+                #[allow(clippy::used_underscore_binding)]
                 self.rewrite(new_filter, _config)
             }
             LogicalPlan::Repartition(repartition) => {
diff --git a/datafusion/optimizer/src/push_down_limit.rs 
b/datafusion/optimizer/src/push_down_limit.rs
index 4da112d515..04ff943472 100644
--- a/datafusion/optimizer/src/push_down_limit.rs
+++ b/datafusion/optimizer/src/push_down_limit.rs
@@ -82,6 +82,7 @@ impl OptimizerRule for PushDownLimit {
             });
 
             // recursively reapply the rule on the new plan
+            #[allow(clippy::used_underscore_binding)]
             return self.rewrite(plan, _config);
         }
 
diff --git a/datafusion/physical-optimizer/src/aggregate_statistics.rs 
b/datafusion/physical-optimizer/src/aggregate_statistics.rs
index a9b02188a7..0d3d83c583 100644
--- a/datafusion/physical-optimizer/src/aggregate_statistics.rs
+++ b/datafusion/physical-optimizer/src/aggregate_statistics.rs
@@ -45,7 +45,7 @@ impl PhysicalOptimizerRule for AggregateStatistics {
     fn optimize(
         &self,
         plan: Arc<dyn ExecutionPlan>,
-        _config: &ConfigOptions,
+        config: &ConfigOptions,
     ) -> Result<Arc<dyn ExecutionPlan>> {
         if let Some(partial_agg_exec) = take_optimizable(&*plan) {
             let partial_agg_exec = partial_agg_exec
@@ -83,12 +83,12 @@ impl PhysicalOptimizerRule for AggregateStatistics {
                 )?))
             } else {
                 plan.map_children(|child| {
-                    self.optimize(child, _config).map(Transformed::yes)
+                    self.optimize(child, config).map(Transformed::yes)
                 })
                 .data()
             }
         } else {
-            plan.map_children(|child| self.optimize(child, 
_config).map(Transformed::yes))
+            plan.map_children(|child| self.optimize(child, 
config).map(Transformed::yes))
                 .data()
         }
     }
diff --git a/datafusion/physical-plan/src/joins/nested_loop_join.rs 
b/datafusion/physical-plan/src/joins/nested_loop_join.rs
index f6fa8878e0..88d3ea9e7e 100644
--- a/datafusion/physical-plan/src/joins/nested_loop_join.rs
+++ b/datafusion/physical-plan/src/joins/nested_loop_join.rs
@@ -75,7 +75,9 @@ struct JoinLeftData {
     probe_threads_counter: AtomicUsize,
     /// Memory reservation for tracking batch and bitmap
     /// Cleared on `JoinLeftData` drop
-    _reservation: MemoryReservation,
+    /// reservation is cleared on Drop
+    #[expect(dead_code)]
+    reservation: MemoryReservation,
 }
 
 impl JoinLeftData {
@@ -83,13 +85,13 @@ impl JoinLeftData {
         batch: RecordBatch,
         bitmap: SharedBitmapBuilder,
         probe_threads_counter: AtomicUsize,
-        _reservation: MemoryReservation,
+        reservation: MemoryReservation,
     ) -> Self {
         Self {
             batch,
             bitmap,
             probe_threads_counter,
-            _reservation,
+            reservation,
         }
     }
 
diff --git a/datafusion/sql/src/unparser/dialect.rs 
b/datafusion/sql/src/unparser/dialect.rs
index 399f0df0a6..77d58de792 100644
--- a/datafusion/sql/src/unparser/dialect.rs
+++ b/datafusion/sql/src/unparser/dialect.rs
@@ -898,8 +898,8 @@ impl CustomDialectBuilder {
         self
     }
 
-    pub fn with_unnest_as_table_factor(mut self, _unnest_as_table_factor: 
bool) -> Self {
-        self.unnest_as_table_factor = _unnest_as_table_factor;
+    pub fn with_unnest_as_table_factor(mut self, unnest_as_table_factor: bool) 
-> Self {
+        self.unnest_as_table_factor = unnest_as_table_factor;
         self
     }
 }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to