not-my-profile commented on code in PR #7425:
URL: https://github.com/apache/arrow-datafusion/pull/7425#discussion_r1317024608


##########
datafusion/core/src/datasource/physical_plan/finder.rs:
##########
@@ -0,0 +1,114 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! [`PartitionedFileFinder`] to scan [`ExecutionPlan`]` for input
+//! partitioned file sources.
+
+use std::sync::Arc;
+
+use datafusion_common::{
+    tree_node::{TreeNode, VisitRecursion},
+    Result,
+};
+
+use crate::{datasource::listing::PartitionedFile, 
physical_plan::ExecutionPlan};
+
+use super::{AvroExec, CsvExec, NdJsonExec, ParquetExec};
+
+pub type FinderFunction =
+    Box<dyn Fn(&dyn ExecutionPlan) -> Option<Vec<Vec<PartitionedFile>>>>;
+
+/// Get all of the [`PartitionedFile`] to be scanned for an [`ExecutionPlan`]
+///
+/// This structure will find all `ScanFileConfig` in any built in
+/// [`ExecutionPlan`] and allows finding user defined nodes as well
+pub struct PartitionedFileFinder {
+    custom_finder: FinderFunction,
+}
+
+impl Default for PartitionedFileFinder {
+    fn default() -> Self {
+        Self {
+            // default custom finding function does nothing
+            custom_finder: Box::new(|_plan| None),
+        }
+    }
+}
+
+impl PartitionedFileFinder {
+    /// Create a new file finder
+    pub fn new() -> Self {
+        Default::default()
+    }
+
+    /// Get all [`PartitionedFile`]s that are scanned for an
+    /// [`ExecutionPlan`],  by recursively checking all children
+    pub fn find(&self, plan: Arc<dyn ExecutionPlan>) -> 
Vec<Vec<Vec<PartitionedFile>>> {
+        let mut collector: Vec<Vec<Vec<PartitionedFile>>> = vec![];
+        plan.apply(&mut |plan| {
+            if let Some(files) = self.get_files(plan.as_ref()) {
+                collector.push(files);
+            }
+            Ok(VisitRecursion::Continue)
+        })
+        .expect("infallable");
+        collector
+    }
+
+    /// Provide a custom method to find `PartitionedFiles` for
+    /// `ExecutionPlans`
+    ///
+    /// Called on all [`ExecutionPlan`]s other than built ins such as
+    /// [`ParquetExec`] and can be used to extract
+    /// [`PartitionedFile`]s from user defined nodes
+    pub fn with_finder<F>(mut self, custom_finder: F) -> Self
+    where
+        F: Fn(&dyn ExecutionPlan) -> Option<Vec<Vec<PartitionedFile>>> + 
'static,
+    {
+        self.custom_finder = Box::new(custom_finder);
+        self
+    }
+
+    /// Return the [`PartitionedFile`] scanned by this plan node, or
+    /// `None` if the plan does not can files
+    fn get_files(&self, plan: &dyn ExecutionPlan) -> 
Option<Vec<Vec<PartitionedFile>>> {
+        let plan_any = plan.as_any();
+        if let Some(parquet_exec) = plan_any.downcast_ref::<ParquetExec>() {
+            Some(parquet_exec.base_config().file_groups.clone())
+        } else if let Some(avro_exec) = plan_any.downcast_ref::<AvroExec>() {
+            Some(avro_exec.base_config().file_groups.clone())
+        } else if let Some(json_exec) = plan_any.downcast_ref::<NdJsonExec>() {
+            Some(json_exec.base_config().file_groups.clone())
+        } else if let Some(csv_exec) = plan_any.downcast_ref::<CsvExec>() {
+            Some(csv_exec.base_config().file_groups.clone())
+        } else {
+            (self.custom_finder)(plan)
+        }

Review Comment:
   @crepererum I don't really see how an `Extractor<T>` type would improve 
anything. It would just add a difficult to understand type for a very niche use 
case to the public API. And I don't think it would improve the ergonomics at 
all ... since the extraction logic may very well depend on the type being 
extracted (and the API I suggest nicely let's you [reuse such 
logic](https://github.com/apache/arrow-datafusion/blob/4a1c7abb1ace13a42a03375d4949a1260d4cd10b/datafusion/core/src/datasource/physical_plan/file_scan_config.rs#L81-L83)).
   
   @tustvold Right I also noticed that it's not really used. It was apparently 
introduced in #5572 to address #5566:
   
   > Currently the TreeNodeRewriter is as a visitor to transform a node to 
another. However, sometimes we don't need to do the transformation and what we 
want is only to collect some info from the node. To achieve this, it's better 
to introduce another visitor for collecting info and keep the node unchanged.
   
   I'm not familiar enough with datafusion to determine whether or not that 
motivation still applies.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to