wiedld commented on code in PR #11444:
URL: https://github.com/apache/datafusion/pull/11444#discussion_r1681449232


##########
datafusion/common/src/file_options/parquet_writer.rs:
##########
@@ -52,92 +53,43 @@ impl ParquetWriterOptions {
 impl TryFrom<&TableParquetOptions> for ParquetWriterOptions {
     type Error = DataFusionError;
 
-    fn try_from(parquet_options: &TableParquetOptions) -> Result<Self> {
-        let ParquetOptions {
-            data_pagesize_limit,
-            write_batch_size,
-            writer_version,
-            dictionary_page_size_limit,
-            max_row_group_size,
-            created_by,
-            column_index_truncate_length,
-            data_page_row_count_limit,
-            bloom_filter_on_write,
-            encoding,
-            dictionary_enabled,
-            compression,
-            statistics_enabled,
-            max_statistics_size,
-            bloom_filter_fpp,
-            bloom_filter_ndv,
-            //  below is not part of ParquetWriterOptions
-            enable_page_index: _,
-            pruning: _,
-            skip_metadata: _,
-            metadata_size_hint: _,
-            pushdown_filters: _,
-            reorder_filters: _,
-            allow_single_file_parallelism: _,
-            maximum_parallel_row_group_writers: _,
-            maximum_buffered_record_batches_per_stream: _,
-            bloom_filter_on_read: _,
-        } = &parquet_options.global;
-
-        let key_value_metadata = if 
!parquet_options.key_value_metadata.is_empty() {
-            Some(
-                parquet_options
-                    .key_value_metadata
-                    .clone()
-                    .drain()
-                    .map(|(key, value)| KeyValue { key, value })
-                    .collect::<Vec<_>>(),
-            )
-        } else {
-            None
-        };
-
-        let mut builder = WriterProperties::builder()
-            .set_data_page_size_limit(*data_pagesize_limit)
-            .set_write_batch_size(*write_batch_size)
-            .set_writer_version(parse_version_string(writer_version.as_str())?)
-            .set_dictionary_page_size_limit(*dictionary_page_size_limit)
-            .set_max_row_group_size(*max_row_group_size)
-            .set_created_by(created_by.clone())
-            .set_column_index_truncate_length(*column_index_truncate_length)
-            .set_data_page_row_count_limit(*data_page_row_count_limit)
-            .set_bloom_filter_enabled(*bloom_filter_on_write)
-            .set_key_value_metadata(key_value_metadata);
-
-        if let Some(encoding) = &encoding {
-            builder = builder.set_encoding(parse_encoding_string(encoding)?);
-        }
-
-        if let Some(enabled) = dictionary_enabled {
-            builder = builder.set_dictionary_enabled(*enabled);
-        }
-
-        if let Some(compression) = &compression {
-            builder = 
builder.set_compression(parse_compression_string(compression)?);
-        }
-
-        if let Some(statistics) = &statistics_enabled {
-            builder =
-                
builder.set_statistics_enabled(parse_statistics_string(statistics)?);
-        }
-
-        if let Some(size) = max_statistics_size {
-            builder = builder.set_max_statistics_size(*size);
-        }
+    fn try_from(parquet_table_options: &TableParquetOptions) -> Result<Self> {
+        // ParquetWriterOptions will have defaults for the remaining fields 
(e.g. sorting_columns)
+        Ok(ParquetWriterOptions {
+            writer_options: 
WriterPropertiesBuilder::try_from(parquet_table_options)?
+                .build(),
+        })
+    }
+}
 
-        if let Some(fpp) = bloom_filter_fpp {
-            builder = builder.set_bloom_filter_fpp(*fpp);
-        }
+impl TryFrom<&TableParquetOptions> for WriterPropertiesBuilder {

Review Comment:
   We chatted. The session config (`TableParquetOptions`) is re-used repeatedly 
in an execution, versus deriving the `WriterPropertiesBuilder` per each use of 
an arrow writer. Which means that we pass by reference, or clone the session 
config each time.
   
   We decide it's ok as is. 👍🏼  



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to