pjmore commented on a change in pull request #2000:
URL: https://github.com/apache/arrow-datafusion/pull/2000#discussion_r825497085
##########
File path: datafusion/src/physical_plan/file_format/parquet.rs
##########
@@ -446,6 +473,78 @@ fn build_row_group_predicate(
}
}
+#[allow(clippy::too_many_arguments)]
+fn read_partition_no_file_columns(
+ object_store: &dyn ObjectStore,
+ partition_index: usize,
+ partition: &[PartitionedFile],
+ metrics: ExecutionPlanMetricsSet,
+ predicate_builder: &Option<PruningPredicate>,
+ batch_size: usize,
+ response_tx: Sender<ArrowResult<RecordBatch>>,
+ limit: Option<usize>,
+ mut partition_column_projector: PartitionColumnProjector,
+) -> Result<()> {
+ let mut remaining_rows = limit.unwrap_or(usize::MAX);
+ for partitioned_file in partition {
+ let mut file_row_count = 0;
+ let file_metrics = ParquetFileMetrics::new(
+ partition_index,
+ &*partitioned_file.file_meta.path(),
+ &metrics,
+ );
+ let object_reader =
+
object_store.file_reader(partitioned_file.file_meta.sized_file.clone())?;
+ let mut file_reader =
+ SerializedFileReader::new(ChunkObjectReader(object_reader))?;
+ if let Some(predicate_builder) = predicate_builder {
Review comment:
Whoops that's right, I'll remove the filter/see if there are file level
row counts I can use.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]