This is an automated email from the ASF dual-hosted git repository.

alamb pushed a commit to branch active_release
in repository https://gitbox.apache.org/repos/asf/arrow-rs.git


The following commit(s) were added to refs/heads/active_release by this push:
     new e80bb01  fix some clippy warnings (#896) (#930)
e80bb01 is described below

commit e80bb018450f13a30811ffd244c42917d8bf8a62
Author: Andrew Lamb <[email protected]>
AuthorDate: Tue Nov 9 07:24:34 2021 -0500

    fix some clippy warnings (#896) (#930)
    
    Co-authored-by: Jiayu Liu <[email protected]>
---
 arrow/src/error.rs           |  2 +-
 arrow/src/util/test_util.rs  |  2 +-
 parquet/src/data_type.rs     | 16 ++--------------
 parquet/src/record/reader.rs |  4 ++--
 parquet/src/schema/parser.rs |  2 +-
 parquet_derive/src/lib.rs    |  5 +----
 6 files changed, 8 insertions(+), 23 deletions(-)

diff --git a/arrow/src/error.rs b/arrow/src/error.rs
index 86896c0..ef7abbb 100644
--- a/arrow/src/error.rs
+++ b/arrow/src/error.rs
@@ -65,7 +65,7 @@ impl From<csv_crate::Error> for ArrowError {
             csv_crate::ErrorKind::Io(error) => 
ArrowError::CsvError(error.to_string()),
             csv_crate::ErrorKind::Utf8 { pos: _, err } => 
ArrowError::CsvError(format!(
                 "Encountered UTF-8 error while reading CSV file: {}",
-                err.to_string()
+                err
             )),
             csv_crate::ErrorKind::UnequalLengths {
                 expected_len, len, ..
diff --git a/arrow/src/util/test_util.rs b/arrow/src/util/test_util.rs
index 4b193f7..f02c8d0 100644
--- a/arrow/src/util/test_util.rs
+++ b/arrow/src/util/test_util.rs
@@ -124,7 +124,7 @@ fn get_data_dir(udf_env: &str, submodule_data: &str) -> 
Result<PathBuf, Box<dyn
             } else {
                 return Err(format!(
                     "the data dir `{}` defined by env {} not found",
-                    pb.display().to_string(),
+                    pb.display(),
                     udf_env
                 )
                 .into());
diff --git a/parquet/src/data_type.rs b/parquet/src/data_type.rs
index 8c64e86..6f3468a 100644
--- a/parquet/src/data_type.rs
+++ b/parquet/src/data_type.rs
@@ -36,7 +36,7 @@ use crate::util::{
 
 /// Rust representation for logical type INT96, value is backed by an array of 
`u32`.
 /// The type only takes 12 bytes, without extra padding.
-#[derive(Clone, Debug, PartialOrd)]
+#[derive(Clone, Debug, PartialOrd, Default)]
 pub struct Int96 {
     value: Option<[u32; 3]>,
 }
@@ -75,12 +75,6 @@ impl Int96 {
     }
 }
 
-impl Default for Int96 {
-    fn default() -> Self {
-        Self { value: None }
-    }
-}
-
 impl PartialEq for Int96 {
     fn eq(&self, other: &Int96) -> bool {
         match (&self.value, &other.value) {
@@ -109,7 +103,7 @@ impl fmt::Display for Int96 {
 
 /// Rust representation for BYTE_ARRAY and FIXED_LEN_BYTE_ARRAY Parquet 
physical types.
 /// Value is backed by a byte buffer.
-#[derive(Clone)]
+#[derive(Clone, Default)]
 pub struct ByteArray {
     data: Option<ByteBufferPtr>,
 }
@@ -231,12 +225,6 @@ impl From<ByteBuffer> for ByteArray {
     }
 }
 
-impl Default for ByteArray {
-    fn default() -> Self {
-        ByteArray { data: None }
-    }
-}
-
 impl PartialEq for ByteArray {
     fn eq(&self, other: &ByteArray) -> bool {
         match (&self.data, &other.data) {
diff --git a/parquet/src/record/reader.rs b/parquet/src/record/reader.rs
index 8f901f5..c45e097 100644
--- a/parquet/src/record/reader.rs
+++ b/parquet/src/record/reader.rs
@@ -136,7 +136,7 @@ impl TreeBuilder {
                 .column_descr_ptr();
             let col_reader = 
row_group_reader.get_column_reader(orig_index).unwrap();
             let column = TripletIter::new(col_descr, col_reader, 
self.batch_size);
-            Reader::PrimitiveReader(field, column)
+            Reader::PrimitiveReader(field, Box::new(column))
         } else {
             match field.get_basic_info().converted_type() {
                 // List types
@@ -319,7 +319,7 @@ impl TreeBuilder {
 /// Reader tree for record assembly
 pub enum Reader {
     // Primitive reader with type information and triplet iterator
-    PrimitiveReader(TypePtr, TripletIter),
+    PrimitiveReader(TypePtr, Box<TripletIter>),
     // Optional reader with definition level of a parent and a reader
     OptionReader(i16, Box<Reader>),
     // Group (struct) reader with type information, definition level and list 
of child
diff --git a/parquet/src/schema/parser.rs b/parquet/src/schema/parser.rs
index ba1f566..3a840d2 100644
--- a/parquet/src/schema/parser.rs
+++ b/parquet/src/schema/parser.rs
@@ -77,7 +77,7 @@ impl<'a> Tokenizer<'a> {
     pub fn from_str(string: &'a str) -> Self {
         let vec = string
             .split_whitespace()
-            .flat_map(|t| Self::split_token(t))
+            .flat_map(Self::split_token)
             .collect();
         Tokenizer {
             tokens: vec,
diff --git a/parquet_derive/src/lib.rs b/parquet_derive/src/lib.rs
index 6d75150..6b5d298 100644
--- a/parquet_derive/src/lib.rs
+++ b/parquet_derive/src/lib.rs
@@ -85,10 +85,7 @@ pub fn parquet_record_writer(input: proc_macro::TokenStream) 
-> proc_macro::Toke
         Data::Union(_) => unimplemented!("Union currently is not supported"),
     };
 
-    let field_infos: Vec<_> = fields
-        .iter()
-        .map(|f: &syn::Field| parquet_field::Field::from(f))
-        .collect();
+    let field_infos: Vec<_> = 
fields.iter().map(parquet_field::Field::from).collect();
 
     let writer_snippets: Vec<proc_macro2::TokenStream> =
         field_infos.iter().map(|x| x.writer_snippet()).collect();

Reply via email to