corwinjoy commented on code in PR #16738:
URL: https://github.com/apache/datafusion/pull/16738#discussion_r2201805504
##########
datafusion/datasource-parquet/src/file_format.rs:
##########
@@ -1723,28 +1708,47 @@ async fn output_single_parquet_file_parallelized(
let (serialize_tx, serialize_rx) =
mpsc::channel::<SpawnedTask<RBStreamSerializeResult>>(max_rowgroups);
+ let parquet_schema = ArrowSchemaConverter::new()
+ .with_coerce_types(parquet_props.coerce_types())
+ .convert(&output_schema)?;
+ let merged_buff = SharedBuffer::new(INITIAL_BUFFER_BYTES);
+ let parquet_writer = SerializedFileWriter::new(
+ merged_buff.clone(),
+ parquet_schema.root_schema_ptr(),
+ parquet_props.clone().into(),
+ )?;
+ let arrow_row_group_writer_factory = ArrowRowGroupWriterFactory::new(
+ &parquet_writer,
+ parquet_schema,
+ Arc::clone(&output_schema),
+ parquet_props.clone().into(),
+ );
+
let arc_props = Arc::new(parquet_props.clone());
let launch_serialization_task = spawn_parquet_parallel_serialization_task(
+ Arc::new(arrow_row_group_writer_factory),
data,
serialize_tx,
Arc::clone(&output_schema),
Arc::clone(&arc_props),
parallel_options,
Arc::clone(&pool),
);
+
+ launch_serialization_task
+ .join_unwind()
+ .await
+ .map_err(|e| DataFusionError::ExecutionJoin(Box::new(e)))??;
+
let file_metadata = concatenate_parallel_row_groups(
+ parquet_writer,
+ merged_buff,
serialize_rx,
- Arc::clone(&output_schema),
- Arc::clone(&arc_props),
object_store_writer,
pool,
)
.await?;
- launch_serialization_task
- .join_unwind()
- .await
- .map_err(|e| DataFusionError::ExecutionJoin(Box::new(e)))??;
Ok(file_metadata)
}
Review Comment:
Copilot comments:
- Row Group Indexing: The TODO comment indicates that the row group writer
currently assumes a single row group index (always 0). If multiple row groups
are written in parallel, this might cause incorrect output or data corruption.
Consider addressing this before merging.
- Encryption Parallelism: The removal of the explicit check for parallelism
when encryption is enabled relies on arrow-rs’ internal handling. Double-check
upstream to ensure there’s a clear error or fallback if parallelism is not
supported with encryption.
- Testing: These changes affect core serialization logic. Ensure thorough
integration and performance tests, especially for edge cases (encryption, large
datasets, multiple row groups).
- Documentation: Consider updating related docs or comments, as the code
flow and APIs used have changed significantly.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]