This is an automated email from the ASF dual-hosted git repository.

dheres pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/arrow-datafusion.git


The following commit(s) were added to refs/heads/master by this push:
     new e1e7b86  Address clippy warnings (#1553)
e1e7b86 is described below

commit e1e7b8651f0aedc93fbf830daef82457f32bf0c2
Author: Sergey Melnychuk <[email protected]>
AuthorDate: Wed Jan 12 21:39:34 2022 +0100

    Address clippy warnings (#1553)
    
    * Fix 'new_without_default' clippy warnings
    
    * Fix 'from_over_into' clippy warnings
    
    * Push 'allow(module_inception)' down from lib.rs
    
    * Fix 'complex_type' clippy warning with type alias
    
    * Remove unused yet allowed clippy warnings
    
    * Fix 'new_without_default' clippy warnings for tests
---
 datafusion/src/catalog/catalog.rs                  | 12 +++++++
 datafusion/src/catalog/mod.rs                      |  1 +
 datafusion/src/catalog/schema.rs                   |  6 ++++
 datafusion/src/datasource/mod.rs                   |  1 +
 datafusion/src/datasource/object_store/mod.rs      |  6 ++++
 datafusion/src/execution/context.rs                | 42 +++++++++++++++-------
 datafusion/src/execution/options.rs                |  6 ++++
 datafusion/src/lib.rs                              |  9 -----
 datafusion/src/logical_plan/dfschema.rs            | 23 ++++++------
 .../src/optimizer/common_subexpr_eliminate.rs      |  6 ++++
 datafusion/src/optimizer/eliminate_limit.rs        |  1 +
 datafusion/src/optimizer/filter_push_down.rs       |  1 +
 datafusion/src/optimizer/limit_push_down.rs        |  1 +
 datafusion/src/optimizer/mod.rs                    |  1 +
 datafusion/src/optimizer/projection_push_down.rs   |  1 +
 datafusion/src/optimizer/simplify_expressions.rs   |  1 +
 .../src/optimizer/single_distinct_to_groupby.rs    |  1 +
 .../src/physical_optimizer/aggregate_statistics.rs |  1 +
 .../src/physical_optimizer/coalesce_batches.rs     |  1 +
 .../physical_optimizer/hash_build_probe_order.rs   |  1 +
 datafusion/src/physical_optimizer/merge_exec.rs    |  1 +
 datafusion/src/physical_optimizer/repartition.rs   |  1 +
 datafusion/src/physical_plan/expressions/case.rs   | 10 +++---
 datafusion/src/physical_plan/metrics/value.rs      | 18 ++++++++++
 datafusion/src/test/exec.rs                        |  7 ++++
 datafusion/src/test/variable.rs                    |  2 ++
 26 files changed, 125 insertions(+), 36 deletions(-)

diff --git a/datafusion/src/catalog/catalog.rs 
b/datafusion/src/catalog/catalog.rs
index 30fea1f..7dbfa5a 100644
--- a/datafusion/src/catalog/catalog.rs
+++ b/datafusion/src/catalog/catalog.rs
@@ -59,6 +59,12 @@ impl MemoryCatalogList {
     }
 }
 
+impl Default for MemoryCatalogList {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl CatalogList for MemoryCatalogList {
     fn as_any(&self) -> &dyn Any {
         self
@@ -84,6 +90,12 @@ impl CatalogList for MemoryCatalogList {
     }
 }
 
+impl Default for MemoryCatalogProvider {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 /// Represents a catalog, comprising a number of named schemas.
 pub trait CatalogProvider: Sync + Send {
     /// Returns the catalog provider as [`Any`](std::any::Any)
diff --git a/datafusion/src/catalog/mod.rs b/datafusion/src/catalog/mod.rs
index 10591f0..478cdef 100644
--- a/datafusion/src/catalog/mod.rs
+++ b/datafusion/src/catalog/mod.rs
@@ -18,6 +18,7 @@
 //! This module contains interfaces and default implementations
 //! of table namespacing concepts, including catalogs and schemas.
 
+#![allow(clippy::module_inception)]
 pub mod catalog;
 pub mod information_schema;
 pub mod schema;
diff --git a/datafusion/src/catalog/schema.rs b/datafusion/src/catalog/schema.rs
index cf754f6..08707ea 100644
--- a/datafusion/src/catalog/schema.rs
+++ b/datafusion/src/catalog/schema.rs
@@ -79,6 +79,12 @@ impl MemorySchemaProvider {
     }
 }
 
+impl Default for MemorySchemaProvider {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl SchemaProvider for MemorySchemaProvider {
     fn as_any(&self) -> &dyn Any {
         self
diff --git a/datafusion/src/datasource/mod.rs b/datafusion/src/datasource/mod.rs
index 9f4f77f..6e119f0 100644
--- a/datafusion/src/datasource/mod.rs
+++ b/datafusion/src/datasource/mod.rs
@@ -17,6 +17,7 @@
 
 //! DataFusion data sources
 
+#![allow(clippy::module_inception)]
 pub mod datasource;
 pub mod empty;
 pub mod file_format;
diff --git a/datafusion/src/datasource/object_store/mod.rs 
b/datafusion/src/datasource/object_store/mod.rs
index 59e1841..aece82a 100644
--- a/datafusion/src/datasource/object_store/mod.rs
+++ b/datafusion/src/datasource/object_store/mod.rs
@@ -186,6 +186,12 @@ impl fmt::Debug for ObjectStoreRegistry {
     }
 }
 
+impl Default for ObjectStoreRegistry {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl ObjectStoreRegistry {
     /// Create the registry that object stores can registered into.
     /// ['LocalFileSystem'] store is registered in by default to support read 
local files natively.
diff --git a/datafusion/src/execution/context.rs 
b/datafusion/src/execution/context.rs
index 8c3df46..944284b 100644
--- a/datafusion/src/execution/context.rs
+++ b/datafusion/src/execution/context.rs
@@ -141,6 +141,12 @@ pub struct ExecutionContext {
     pub state: Arc<Mutex<ExecutionContextState>>,
 }
 
+impl Default for ExecutionContext {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl ExecutionContext {
     /// Creates a new execution context using a default configuration.
     pub fn new() -> Self {
@@ -1049,6 +1055,27 @@ pub struct ExecutionProps {
     pub(crate) query_execution_start_time: DateTime<Utc>,
 }
 
+impl Default for ExecutionProps {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
+impl ExecutionProps {
+    /// Creates a new execution props
+    pub fn new() -> Self {
+        ExecutionProps {
+            query_execution_start_time: chrono::Utc::now(),
+        }
+    }
+
+    /// Marks the execution of query started timestamp
+    pub fn start_execution(&mut self) -> &Self {
+        self.query_execution_start_time = chrono::Utc::now();
+        &*self
+    }
+}
+
 /// Execution context for registering data sources and executing queries
 #[derive(Clone)]
 pub struct ExecutionContextState {
@@ -1068,18 +1095,9 @@ pub struct ExecutionContextState {
     pub object_store_registry: Arc<ObjectStoreRegistry>,
 }
 
-impl ExecutionProps {
-    /// Creates a new execution props
-    pub fn new() -> Self {
-        ExecutionProps {
-            query_execution_start_time: chrono::Utc::now(),
-        }
-    }
-
-    /// Marks the execution of query started timestamp
-    pub fn start_execution(&mut self) -> &Self {
-        self.query_execution_start_time = chrono::Utc::now();
-        &*self
+impl Default for ExecutionContextState {
+    fn default() -> Self {
+        Self::new()
     }
 }
 
diff --git a/datafusion/src/execution/options.rs 
b/datafusion/src/execution/options.rs
index c6b5ff6..219e2fd 100644
--- a/datafusion/src/execution/options.rs
+++ b/datafusion/src/execution/options.rs
@@ -46,6 +46,12 @@ pub struct CsvReadOptions<'a> {
     pub file_extension: &'a str,
 }
 
+impl<'a> Default for CsvReadOptions<'a> {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl<'a> CsvReadOptions<'a> {
     /// Create a CSV read option with default presets
     pub fn new() -> Self {
diff --git a/datafusion/src/lib.rs b/datafusion/src/lib.rs
index df9efaf..26d55bc 100644
--- a/datafusion/src/lib.rs
+++ b/datafusion/src/lib.rs
@@ -15,15 +15,6 @@
 // specific language governing permissions and limitations
 // under the License.
 #![warn(missing_docs, clippy::needless_borrow)]
-// Clippy lints, some should be disabled incrementally
-#![allow(
-    clippy::float_cmp,
-    clippy::from_over_into,
-    clippy::module_inception,
-    clippy::new_without_default,
-    clippy::type_complexity,
-    clippy::upper_case_acronyms
-)]
 
 //! [DataFusion](https://github.com/apache/arrow-datafusion)
 //! is an extensible query execution framework that uses
diff --git a/datafusion/src/logical_plan/dfschema.rs 
b/datafusion/src/logical_plan/dfschema.rs
index 31143c4..9db720e 100644
--- a/datafusion/src/logical_plan/dfschema.rs
+++ b/datafusion/src/logical_plan/dfschema.rs
@@ -297,11 +297,12 @@ impl DFSchema {
     }
 }
 
-impl Into<Schema> for DFSchema {
-    /// Convert a schema into a DFSchema
-    fn into(self) -> Schema {
+impl From<DFSchema> for Schema {
+    /// Convert DFSchema into a Schema
+    fn from(df_schema: DFSchema) -> Self {
         Schema::new(
-            self.fields
+            df_schema
+                .fields
                 .into_iter()
                 .map(|f| {
                     if f.qualifier().is_some() {
@@ -319,10 +320,10 @@ impl Into<Schema> for DFSchema {
     }
 }
 
-impl Into<Schema> for &DFSchema {
-    /// Convert a schema into a DFSchema
-    fn into(self) -> Schema {
-        Schema::new(self.fields.iter().map(|f| f.field.clone()).collect())
+impl From<&DFSchema> for Schema {
+    /// Convert DFSchema reference into a Schema
+    fn from(df_schema: &DFSchema) -> Self {
+        Schema::new(df_schema.fields.iter().map(|f| f.field.clone()).collect())
     }
 }
 
@@ -340,9 +341,9 @@ impl TryFrom<Schema> for DFSchema {
     }
 }
 
-impl Into<SchemaRef> for DFSchema {
-    fn into(self) -> SchemaRef {
-        SchemaRef::new(self.into())
+impl From<DFSchema> for SchemaRef {
+    fn from(df_schema: DFSchema) -> Self {
+        SchemaRef::new(df_schema.into())
     }
 }
 
diff --git a/datafusion/src/optimizer/common_subexpr_eliminate.rs 
b/datafusion/src/optimizer/common_subexpr_eliminate.rs
index 233d112..9470734 100644
--- a/datafusion/src/optimizer/common_subexpr_eliminate.rs
+++ b/datafusion/src/optimizer/common_subexpr_eliminate.rs
@@ -69,6 +69,12 @@ impl OptimizerRule for CommonSubexprEliminate {
     }
 }
 
+impl Default for CommonSubexprEliminate {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl CommonSubexprEliminate {
     #[allow(missing_docs)]
     pub fn new() -> Self {
diff --git a/datafusion/src/optimizer/eliminate_limit.rs 
b/datafusion/src/optimizer/eliminate_limit.rs
index 1f74ae2..c1fc206 100644
--- a/datafusion/src/optimizer/eliminate_limit.rs
+++ b/datafusion/src/optimizer/eliminate_limit.rs
@@ -25,6 +25,7 @@ use super::utils;
 use crate::execution::context::ExecutionProps;
 
 /// Optimization rule that replaces LIMIT 0 with an 
[LogicalPlan::EmptyRelation]
+#[derive(Default)]
 pub struct EliminateLimit;
 
 impl EliminateLimit {
diff --git a/datafusion/src/optimizer/filter_push_down.rs 
b/datafusion/src/optimizer/filter_push_down.rs
index c55a5cd..c721712 100644
--- a/datafusion/src/optimizer/filter_push_down.rs
+++ b/datafusion/src/optimizer/filter_push_down.rs
@@ -54,6 +54,7 @@ use std::{
 /// and when it reaches a node that does not commute with it, it adds the 
filter to that place.
 /// When it passes through a projection, it re-writes the filter's expression 
taking into accoun that projection.
 /// When multiple filters would have been written, it `AND` their expressions 
into a single expression.
+#[derive(Default)]
 pub struct FilterPushDown {}
 
 #[derive(Debug, Clone, Default)]
diff --git a/datafusion/src/optimizer/limit_push_down.rs 
b/datafusion/src/optimizer/limit_push_down.rs
index 15d5093..4fa6e27 100644
--- a/datafusion/src/optimizer/limit_push_down.rs
+++ b/datafusion/src/optimizer/limit_push_down.rs
@@ -28,6 +28,7 @@ use std::sync::Arc;
 
 /// Optimization rule that tries pushes down LIMIT n
 /// where applicable to reduce the amount of scanned / processed data
+#[derive(Default)]
 pub struct LimitPushDown {}
 
 impl LimitPushDown {
diff --git a/datafusion/src/optimizer/mod.rs b/datafusion/src/optimizer/mod.rs
index c5cab97..984cbee 100644
--- a/datafusion/src/optimizer/mod.rs
+++ b/datafusion/src/optimizer/mod.rs
@@ -18,6 +18,7 @@
 //! This module contains a query optimizer that operates against a logical 
plan and applies
 //! some simple rules to a logical plan, such as "Projection Push Down" and 
"Type Coercion".
 
+#![allow(clippy::module_inception)]
 pub mod common_subexpr_eliminate;
 pub mod eliminate_limit;
 pub mod filter_push_down;
diff --git a/datafusion/src/optimizer/projection_push_down.rs 
b/datafusion/src/optimizer/projection_push_down.rs
index f92ab65..fb45e98 100644
--- a/datafusion/src/optimizer/projection_push_down.rs
+++ b/datafusion/src/optimizer/projection_push_down.rs
@@ -39,6 +39,7 @@ use std::{
 
 /// Optimizer that removes unused projections and aggregations from plans
 /// This reduces both scans and
+#[derive(Default)]
 pub struct ProjectionPushDown {}
 
 impl OptimizerRule for ProjectionPushDown {
diff --git a/datafusion/src/optimizer/simplify_expressions.rs 
b/datafusion/src/optimizer/simplify_expressions.rs
index 7445c90..653c613 100644
--- a/datafusion/src/optimizer/simplify_expressions.rs
+++ b/datafusion/src/optimizer/simplify_expressions.rs
@@ -43,6 +43,7 @@ use crate::{error::Result, logical_plan::Operator};
 /// is optimized to
 /// `Filter: b > 2`
 ///
+#[derive(Default)]
 pub struct SimplifyExpressions {}
 
 /// returns true if `needle` is found in a chain of search_op
diff --git a/datafusion/src/optimizer/single_distinct_to_groupby.rs 
b/datafusion/src/optimizer/single_distinct_to_groupby.rs
index 9bddec9..02a24e2 100644
--- a/datafusion/src/optimizer/single_distinct_to_groupby.rs
+++ b/datafusion/src/optimizer/single_distinct_to_groupby.rs
@@ -40,6 +40,7 @@ use std::sync::Arc;
 ///    )
 ///    GROUP BY k
 ///  ```
+#[derive(Default)]
 pub struct SingleDistinctToGroupBy {}
 
 const SINGLE_DISTINCT_ALIAS: &str = "alias1";
diff --git a/datafusion/src/physical_optimizer/aggregate_statistics.rs 
b/datafusion/src/physical_optimizer/aggregate_statistics.rs
index 2732777..515f732 100644
--- a/datafusion/src/physical_optimizer/aggregate_statistics.rs
+++ b/datafusion/src/physical_optimizer/aggregate_statistics.rs
@@ -34,6 +34,7 @@ use super::utils::optimize_children;
 use crate::error::Result;
 
 /// Optimizer that uses available statistics for aggregate functions
+#[derive(Default)]
 pub struct AggregateStatistics {}
 
 impl AggregateStatistics {
diff --git a/datafusion/src/physical_optimizer/coalesce_batches.rs 
b/datafusion/src/physical_optimizer/coalesce_batches.rs
index 9af8911..14616a7 100644
--- a/datafusion/src/physical_optimizer/coalesce_batches.rs
+++ b/datafusion/src/physical_optimizer/coalesce_batches.rs
@@ -29,6 +29,7 @@ use crate::{
 use std::sync::Arc;
 
 /// Optimizer that introduces CoalesceBatchesExec to avoid overhead with small 
batches
+#[derive(Default)]
 pub struct CoalesceBatches {}
 
 impl CoalesceBatches {
diff --git a/datafusion/src/physical_optimizer/hash_build_probe_order.rs 
b/datafusion/src/physical_optimizer/hash_build_probe_order.rs
index 0d1c39f..1184751 100644
--- a/datafusion/src/physical_optimizer/hash_build_probe_order.rs
+++ b/datafusion/src/physical_optimizer/hash_build_probe_order.rs
@@ -38,6 +38,7 @@ use crate::error::Result;
 /// is the smallest.
 /// If the information is not available, the order stays the same,
 /// so that it could be optimized manually in a query.
+#[derive(Default)]
 pub struct HashBuildProbeOrder {}
 
 impl HashBuildProbeOrder {
diff --git a/datafusion/src/physical_optimizer/merge_exec.rs 
b/datafusion/src/physical_optimizer/merge_exec.rs
index 0127313..58823a6 100644
--- a/datafusion/src/physical_optimizer/merge_exec.rs
+++ b/datafusion/src/physical_optimizer/merge_exec.rs
@@ -26,6 +26,7 @@ use crate::{
 use std::sync::Arc;
 
 /// Introduces CoalescePartitionsExec
+#[derive(Default)]
 pub struct AddCoalescePartitionsExec {}
 
 impl AddCoalescePartitionsExec {
diff --git a/datafusion/src/physical_optimizer/repartition.rs 
b/datafusion/src/physical_optimizer/repartition.rs
index 8ac9dad..26f77ef 100644
--- a/datafusion/src/physical_optimizer/repartition.rs
+++ b/datafusion/src/physical_optimizer/repartition.rs
@@ -26,6 +26,7 @@ use crate::physical_plan::{Distribution, Partitioning::*};
 use crate::{error::Result, execution::context::ExecutionConfig};
 
 /// Optimizer that introduces repartition to introduce more parallelism in the 
plan
+#[derive(Default)]
 pub struct Repartition {}
 
 impl Repartition {
diff --git a/datafusion/src/physical_plan/expressions/case.rs 
b/datafusion/src/physical_plan/expressions/case.rs
index f577d6c..551d87a 100644
--- a/datafusion/src/physical_plan/expressions/case.rs
+++ b/datafusion/src/physical_plan/expressions/case.rs
@@ -24,6 +24,8 @@ use arrow::compute::{eq, eq_utf8};
 use arrow::datatypes::{DataType, Schema};
 use arrow::record_batch::RecordBatch;
 
+type WhenThen = (Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>);
+
 /// The CASE expression is similar to a series of nested if/else and there are 
two forms that
 /// can be used. The first form consists of a series of boolean "when" 
expressions with
 /// corresponding "then" expressions, and an optional "else" expression.
@@ -46,7 +48,7 @@ pub struct CaseExpr {
     /// Optional base expression that can be compared to literal values in the 
"when" expressions
     expr: Option<Arc<dyn PhysicalExpr>>,
     /// One or more when/then expressions
-    when_then_expr: Vec<(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)>,
+    when_then_expr: Vec<WhenThen>,
     /// Optional "else" expression
     else_expr: Option<Arc<dyn PhysicalExpr>>,
 }
@@ -71,7 +73,7 @@ impl CaseExpr {
     /// Create a new CASE WHEN expression
     pub fn try_new(
         expr: Option<Arc<dyn PhysicalExpr>>,
-        when_then_expr: &[(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)],
+        when_then_expr: &[WhenThen],
         else_expr: Option<Arc<dyn PhysicalExpr>>,
     ) -> Result<Self> {
         if when_then_expr.is_empty() {
@@ -93,7 +95,7 @@ impl CaseExpr {
     }
 
     /// One or more when/then expressions
-    pub fn when_then_expr(&self) -> &[(Arc<dyn PhysicalExpr>, Arc<dyn 
PhysicalExpr>)] {
+    pub fn when_then_expr(&self) -> &[WhenThen] {
         &self.when_then_expr
     }
 
@@ -437,7 +439,7 @@ impl PhysicalExpr for CaseExpr {
 /// Create a CASE expression
 pub fn case(
     expr: Option<Arc<dyn PhysicalExpr>>,
-    when_thens: &[(Arc<dyn PhysicalExpr>, Arc<dyn PhysicalExpr>)],
+    when_thens: &[WhenThen],
     else_expr: Option<Arc<dyn PhysicalExpr>>,
 ) -> Result<Arc<dyn PhysicalExpr>> {
     Ok(Arc::new(CaseExpr::try_new(expr, when_thens, else_expr)?))
diff --git a/datafusion/src/physical_plan/metrics/value.rs 
b/datafusion/src/physical_plan/metrics/value.rs
index 6f63583..1caf13e 100644
--- a/datafusion/src/physical_plan/metrics/value.rs
+++ b/datafusion/src/physical_plan/metrics/value.rs
@@ -50,6 +50,12 @@ impl Display for Count {
     }
 }
 
+impl Default for Count {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl Count {
     /// create a new counter
     pub fn new() -> Self {
@@ -78,6 +84,12 @@ pub struct Time {
     nanos: Arc<AtomicUsize>,
 }
 
+impl Default for Time {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl PartialEq for Time {
     fn eq(&self, other: &Self) -> bool {
         self.value().eq(&other.value())
@@ -140,6 +152,12 @@ pub struct Timestamp {
     timestamp: Arc<Mutex<Option<DateTime<Utc>>>>,
 }
 
+impl Default for Timestamp {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl Timestamp {
     /// Create a new timestamp and sets its value to 0
     pub fn new() -> Self {
diff --git a/datafusion/src/test/exec.rs b/datafusion/src/test/exec.rs
index 4a9534f..8351c9b 100644
--- a/datafusion/src/test/exec.rs
+++ b/datafusion/src/test/exec.rs
@@ -342,6 +342,13 @@ impl ExecutionPlan for BarrierExec {
 pub struct ErrorExec {
     schema: SchemaRef,
 }
+
+impl Default for ErrorExec {
+    fn default() -> Self {
+        Self::new()
+    }
+}
+
 impl ErrorExec {
     pub fn new() -> Self {
         let schema = Arc::new(Schema::new(vec![Field::new(
diff --git a/datafusion/src/test/variable.rs b/datafusion/src/test/variable.rs
index 47d1370..fcde0e9 100644
--- a/datafusion/src/test/variable.rs
+++ b/datafusion/src/test/variable.rs
@@ -22,6 +22,7 @@ use crate::scalar::ScalarValue;
 use crate::variable::VarProvider;
 
 /// System variable
+#[derive(Default)]
 pub struct SystemVar {}
 
 impl SystemVar {
@@ -40,6 +41,7 @@ impl VarProvider for SystemVar {
 }
 
 /// user defined variable
+#[derive(Default)]
 pub struct UserDefinedVar {}
 
 impl UserDefinedVar {

Reply via email to