liukun4515 commented on code in PR #3380:
URL: https://github.com/apache/arrow-datafusion/pull/3380#discussion_r970197922


##########
datafusion/core/src/physical_plan/file_format/row_filter.rs:
##########
@@ -0,0 +1,400 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+use arrow::array::{Array, BooleanArray};
+use arrow::datatypes::{DataType, Field, Schema};
+use arrow::error::{ArrowError, Result as ArrowResult};
+use arrow::record_batch::RecordBatch;
+use datafusion_common::{Column, Result, ScalarValue, ToDFSchema};
+use datafusion_expr::expr_rewriter::{ExprRewritable, ExprRewriter, 
RewriteRecursion};
+
+use datafusion_expr::{uncombine_filter, Expr};
+use datafusion_physical_expr::execution_props::ExecutionProps;
+use datafusion_physical_expr::{create_physical_expr, PhysicalExpr};
+use parquet::arrow::arrow_reader::{ArrowPredicate, RowFilter};
+use parquet::arrow::ProjectionMask;
+use parquet::file::metadata::ParquetMetaData;
+use std::sync::Arc;
+
+/// This module contains utilities for enabling the pushdown of DataFusion 
filter predicates (which
+/// can be any DataFusion `Expr` that evaluates to a `BooleanArray`) to the 
parquet decoder level in `arrow-rs`.
+/// DataFusion will use a `ParquetRecordBatchStream` to read data from parquet 
into arrow `RecordBatch`es.
+/// When constructing the  `ParquetRecordBatchStream` you can provide a 
`RowFilter` which is itself just a vector
+/// of `Box<dyn ArrowPredicate>`. During decoding, the predicates are 
evaluated to generate a mask which is used
+/// to avoid decoding rows in projected columns which are not selected which 
can significantly reduce the amount
+/// of compute required for decoding.
+///
+/// Since the predicates are applied serially in the order defined in the 
`RowFilter`, the optimal ordering
+/// will depend on the exact filters. The best filters to execute first have 
two properties:
+///     1. The are relatively inexpensive to evaluate (e.g. they read column 
chunks which are relatively small)
+///     2. They filter a lot of rows, reducing the amount of decoding required 
for subsequent filters and projected columns
+///
+/// Given the metadata exposed by parquet, the selectivity of filters is not 
easy to estimate so the heuristics we use here primarily
+/// focus on the evaluation cost.
+///
+/// The basic algorithm for constructing the `RowFilter` is as follows
+///     1. Recursively break conjunctions into separate predicates. An 
expression like `a = 1 AND (b = 2 AND c = 3)` would be
+///        separated into the expressions `a = 1`, `b = 2`, and `c = 3`.
+///     2. Determine whether each predicate is suitable as an 
`ArrowPredicate`. As long as the predicate does not reference any projected 
columns
+///        or columns with non-primitive types, then it is considered suitable.
+///     3. Determine, for each predicate, the total compressed size of all 
columns required to evaluate the predicate.
+///     4. Determine, for each predicate, whether all columns required to 
evaluate the expression are sorted.
+///     5. Re-order the predicate by total size (from step 3).
+///     6. Partition the predicates according to whether they are sorted (from 
step 4)
+///     7. "Compile" each predicate `Expr` to a `DatafusionArrowPredicate`.
+///     8. Build the `RowFilter` with the sorted predicates followed by the 
unsorted predicates. Within each partition
+///        the predicates will still be sorted by size.
+
+/// A predicate which can be passed to `ParquetRecordBatchStream` to perform 
row-level
+/// filtering during parquet decoding.
+#[derive(Debug)]
+pub(crate) struct DatafusionArrowPredicate {
+    physical_expr: Arc<dyn PhysicalExpr>,
+    projection: ProjectionMask,
+}
+
+impl DatafusionArrowPredicate {
+    pub fn try_new(
+        candidate: FilterCandidate,
+        schema: &Schema,
+        metadata: &ParquetMetaData,
+    ) -> Result<Self> {
+        let props = ExecutionProps::default();
+
+        let schema = schema.project(&candidate.projection)?;
+        let df_schema = schema.clone().to_dfschema()?;
+
+        let physical_expr =
+            create_physical_expr(&candidate.expr, &df_schema, &schema, 
&props)?;
+
+        Ok(Self {
+            physical_expr,
+            projection: ProjectionMask::roots(
+                metadata.file_metadata().schema_descr(),
+                candidate.projection,
+            ),
+        })
+    }
+}
+
+impl ArrowPredicate for DatafusionArrowPredicate {
+    fn projection(&self) -> &ProjectionMask {
+        &self.projection
+    }
+
+    fn evaluate(&mut self, batch: RecordBatch) -> ArrowResult<BooleanArray> {
+        match self
+            .physical_expr
+            .evaluate(&batch)
+            .map(|v| v.into_array(batch.num_rows()))
+        {
+            Ok(array) => {
+                if let Some(mask) = 
array.as_any().downcast_ref::<BooleanArray>() {
+                    Ok(BooleanArray::from(mask.data().clone()))
+                } else {
+                    Err(ArrowError::ComputeError(
+                        "Unexpected result of predicate evaluation, expected 
BooleanArray".to_owned(),
+                    ))
+                }
+            }
+            Err(e) => Err(ArrowError::ComputeError(format!(
+                "Error evaluating filter predicate: {:?}",
+                e
+            ))),
+        }
+    }
+}
+
+/// A candidate expression for creating a `RowFilter` contains the
+/// expression as well as data to estimate the cost of evaluating
+/// the resulting expression.
+pub(crate) struct FilterCandidate {
+    expr: Expr,
+    required_bytes: usize,
+    can_use_index: bool,
+    projection: Vec<usize>,
+}
+
+/// Helper to build a `FilterCandidate`. This will do several things
+/// 1. Determine the columns required to evaluate the expression
+/// 2. Calculate data required to estimate the cost of evaluating the filter
+/// 3. Rewrite column expressions in the predicate which reference columns not 
in the particular file schema.
+///    This is relevant in the case where we have determined the table schema 
by merging all individual file schemas
+///    and any given file may or may not contain all columns in the merged 
schema. If a particular column is not present
+///    we replace the column expression with a literal expression that 
produces a null value.
+struct FilterCandidateBuilder<'a> {
+    expr: Expr,
+    file_schema: &'a Schema,
+    table_schema: &'a Schema,
+    required_column_indices: Vec<usize>,
+    non_primitive_columns: bool,
+    projected_columns: bool,
+}
+
+impl<'a> FilterCandidateBuilder<'a> {
+    pub fn new(expr: Expr, file_schema: &'a Schema, table_schema: &'a Schema) 
-> Self {
+        Self {
+            expr,
+            file_schema,
+            table_schema,
+            required_column_indices: vec![],
+            non_primitive_columns: false,
+            projected_columns: false,
+        }
+    }
+
+    pub fn build(
+        mut self,
+        metadata: &ParquetMetaData,
+    ) -> Result<Option<FilterCandidate>> {
+        let expr = self.expr.clone();
+        let expr = expr.rewrite(&mut self)?;
+
+        if self.non_primitive_columns || self.projected_columns {
+            Ok(None)
+        } else {
+            let required_bytes =
+                size_of_columns(&self.required_column_indices, metadata)?;
+            let can_use_index = columns_sorted(&self.required_column_indices, 
metadata)?;
+
+            Ok(Some(FilterCandidate {
+                expr,
+                required_bytes,
+                can_use_index,
+                projection: self.required_column_indices,
+            }))
+        }
+    }
+}
+
+impl<'a> ExprRewriter for FilterCandidateBuilder<'a> {
+    fn pre_visit(&mut self, expr: &Expr) -> Result<RewriteRecursion> {
+        if let Expr::Column(column) = expr {
+            if let Ok(idx) = self.file_schema.index_of(&column.name) {
+                self.required_column_indices.push(idx);
+
+                if !is_primitive_field(self.file_schema.field(idx)) {
+                    self.non_primitive_columns = true;
+                }
+            } else if self.table_schema.index_of(&column.name).is_err() {
+                // If the column does not exist in the (un-projected) table 
schema then
+                // it must be a projected column.
+                self.projected_columns = true;
+            }
+        }
+        Ok(RewriteRecursion::Continue)
+    }
+
+    fn mutate(&mut self, expr: Expr) -> Result<Expr> {

Review Comment:
   we need to get the data type from the table schema.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to