Kouhei Sutou created ARROW-5044:
-----------------------------------

             Summary: [Release][Rust] Format error in verification script
                 Key: ARROW-5044
                 URL: https://issues.apache.org/jira/browse/ARROW-5044
             Project: Apache Arrow
          Issue Type: Bug
          Components: Packaging, Rust
            Reporter: Kouhei Sutou
             Fix For: 0.13.0


{noformat}
+ cargo fmt --all -- --check
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/arrow/src/csv/writer.rs at 
line 53:
 //! let batch = RecordBatch::try_new(
 //!     Arc::new(schema),
 //!     vec![Arc::new(c1), Arc::new(c2), Arc::new(c3), Arc::new(c4)],
-//! ).unwrap();
+//! )
+//! .unwrap();
 //!
 //! let file = get_temp_file("out.csv", &[]);
 //!
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/datasource/datasource.rs
 at line 24:
 
 use crate::error::Result;
 
-/// Returned by implementors of `Table#scan`, this `RecordBatchIterator` is 
wrapped with an `Arc`
-/// and `Mutex` so that it can be shared across threads as it is used.
+/// Returned by implementors of `Table#scan`, this `RecordBatchIterator` is 
wrapped with
+/// an `Arc` and `Mutex` so that it can be shared across threads as it is used.
 pub type ScanResult = Arc<Mutex<RecordBatchIterator>>;
 
 /// Source table
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/datasource/datasource.rs
 at line 33:
     /// Get a reference to the schema for this table
     fn schema(&self) -> &Arc<Schema>;
 
-    /// Perform a scan of a table and return a sequence of iterators over the 
data (one iterator per partition)
+    /// Perform a scan of a table and return a sequence of iterators over the 
data (one
+    /// iterator per partition)
     fn scan(
         &self,
         projection: &Option<Vec<usize>>,
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/datasource/memory.rs
 at line 16:
 // under the License.
 
 //! In-memory data source for presenting a Vec<RecordBatch> as a data source 
that can be
-//! queried by DataFusion. This allows data to be pre-loaded into memory and 
then repeatedly
-//! queried without incurring additional file I/O overhead.
+//! queried by DataFusion. This allows data to be pre-loaded into memory and 
then
+//! repeatedly queried without incurring additional file I/O overhead.
 
 use std::sync::{Arc, Mutex};
 
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/execution/context.rs
 at line 15:
 // specific language governing permissions and limitations
 // under the License.
 
-//! ExecutionContext contains methods for registering data sources and 
executing SQL queries
+//! ExecutionContext contains methods for registering data sources and 
executing SQL
+//! queries
 
 use std::cell::RefCell;
 use std::collections::HashMap;
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/execution/context.rs
 at line 139:
         Ok(plan)
     }
 
-    /// Execute a logical plan and produce a Relation (a schema-aware iterator 
over a series
-    /// of RecordBatch instances)
+    /// Execute a logical plan and produce a Relation (a schema-aware iterator 
over a
+    /// series of RecordBatch instances)
     pub fn execute(
         &mut self,
         plan: &LogicalPlan,
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/execution/filter.rs
 at line 15:
 // specific language governing permissions and limitations
 // under the License.
 
-//! Execution of a filter (predicate) relation. The SQL clause `WHERE expr` 
represents a filter.
+//! Execution of a filter (predicate) relation. The SQL clause `WHERE expr` 
represents a
+//! filter.
 
 use std::cell::RefCell;
 use std::rc::Rc;
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/execution/filter.rs
 at line 32:
 
 /// Implementation of a filter relation
 pub(super) struct FilterRelation {
-    /// The schema for the filter relation. This is always the same as the 
schema of the input relation.
+    /// The schema for the filter relation. This is always the same as the 
schema of the
+    /// input relation.
     schema: Arc<Schema>,
     /// Relation that is  being filtered
     input: Rc<RefCell<Relation>>,
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/execution/limit.rs
 at line 33:
 pub(super) struct LimitRelation {
     /// The relation which the limit is being applied to
     input: Rc<RefCell<Relation>>,
-    /// The schema for the limit relation, which is always the same as the 
schema of the input relation
+    /// The schema for the limit relation, which is always the same as the 
schema of the
+    /// input relation
     schema: Arc<Schema>,
     /// The number of rows returned by this relation
     limit: usize,
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/execution/projection.rs
 at line 15:
 // specific language governing permissions and limitations
 // under the License.
 
-//! Defines the projection relation. A projection determines which columns or 
expressions are
-//! returned from a query. The SQL statement `SELECT a, b, a+b FROM t1` is an 
example of a
-//! projection on table `t1` where the expressions `a`, `b`, and `a+b` are the 
projection
-//! expressions.
+//! Defines the projection relation. A projection determines which columns or 
expressions
+//! are returned from a query. The SQL statement `SELECT a, b, a+b FROM t1` is 
an example
+//! of a projection on table `t1` where the expressions `a`, `b`, and `a+b` 
are the
+//! projection expressions.
 
 use std::cell::RefCell;
 use std::rc::Rc;
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/execution/relation.rs
 at line 16:
 // under the License.
 
 //! A relation is a representation of a set of tuples. A database table is a
-//! type of relation. During query execution, each operation on a relation 
(such as projection,
-//! selection, aggregation) results in a new relation.
+//! type of relation. During query execution, each operation on a relation 
(such as
+//! projection, selection, aggregation) results in a new relation.
 
 use std::sync::{Arc, Mutex};
 
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/optimizer/optimizer.rs
 at line 21:
 use crate::logicalplan::LogicalPlan;
 use std::sync::Arc;
 
-/// An optimizer rules performs a transformation on a logical plan to produce 
an optimized logical plan.
+/// An optimizer rules performs a transformation on a logical plan to produce 
an optimized
+/// logical plan.
 pub trait OptimizerRule {
     /// Perform optimizations on the plan
     fn optimize(&mut self, plan: &LogicalPlan) -> Result<Arc<LogicalPlan>>;
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/optimizer/projection_push_down.rs
 at line 142:
                 schema,
                 ..
             } => {
-                // once we reach the table scan, we can use the accumulated 
set of column indexes as
-                // the projection in the table scan
+                // once we reach the table scan, we can use the accumulated 
set of column
+                // indexes as the projection in the table scan
                 let mut projection: Vec<usize> = 
Vec::with_capacity(accum.len());
                 accum.iter().for_each(|i| projection.push(*i));
 
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/optimizer/projection_push_down.rs
 at line 158:
                 }
                 let projected_schema = Schema::new(projected_fields);
 
-                // now that the table scan is returning a different schema we 
need to create a
-                // mapping from the original column index to the new column 
index so that we
-                // can rewrite expressions as we walk back up the tree
+                // now that the table scan is returning a different schema we 
need to
+                // create a mapping from the original column index to the
+                // new column index so that we can rewrite expressions as
+                // we walk back up the tree
 
                 if mapping.len() != 0 {
                     return Err(ExecutionError::InternalError(
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/optimizer/type_coercion.rs
 at line 17:
 
 //! The type_coercion optimizer rule ensures that all binary operators are 
operating on
 //! compatible types by adding explicit cast operations to expressions. For 
example,
-//! the operation `c_float + c_int` would be rewritten as `c_float + 
CAST(c_int AS float)`.
-//! This keeps the runtime query execution code much simpler.
+//! the operation `c_float + c_int` would be rewritten as `c_float + 
CAST(c_int AS
+//! float)`. This keeps the runtime query execution code much simpler.
 
 use std::sync::Arc;
 
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/optimizer/utils.rs
 at line 24:
 use crate::error::{ExecutionError, Result};
 use crate::logicalplan::Expr;
 
-/// Recursively walk a list of expression trees, collecting the unique set of 
column indexes
-/// referenced in the expression
+/// Recursively walk a list of expression trees, collecting the unique set of 
column
+/// indexes referenced in the expression
 pub fn exprlist_to_column_indices(expr: &Vec<Expr>, accum: &mut 
HashSet<usize>) {
     expr.iter().for_each(|e| expr_to_column_indices(e, accum));
 }
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/src/table.rs at 
line 15:
 // specific language governing permissions and limitations
 // under the License.
 
-//! Table API for building a logical query plan. This is similar to the Table 
API in Ibis and
-//! the DataFrame API in Apache Spark
+//! Table API for building a logical query plan. This is similar to the Table 
API in Ibis
+//! and the DataFrame API in Apache Spark
 
 use crate::error::Result;
 use crate::logicalplan::LogicalPlan;
Diff in 
/tmp/arrow-0.13.0.tW4Dz/apache-arrow-0.13.0/rust/datafusion/tests/sql.rs at 
line 129:
     assert_eq!(expected, actual);
 }
 
-//TODO Uncomment the following test when ORDER BY is implemented to be able to 
test ORDER BY + LIMIT
+//TODO Uncomment the following test when ORDER BY is implemented to be able to 
test ORDER
+// BY + LIMIT
 /*
 #[test]
 fn csv_query_limit_with_order_by() {

{noformat}



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to