rluvaton commented on code in PR #9700:
URL: https://github.com/apache/arrow-rs/pull/9700#discussion_r3116960936
##########
parquet/src/arrow/arrow_writer/mod.rs:
##########
@@ -4813,18 +4909,59 @@ mod tests {
let col0_meta = metadata.row_group(0).column(0);
let col1_meta = metadata.row_group(0).column(1);
- let get_dict_page_size = move |meta: &ColumnChunkMetaData| {
- let mut reader =
- SerializedPageReader::new(Arc::new(data.clone()), meta, 0,
None).unwrap();
- let page = reader.get_next_page().unwrap().unwrap();
- match page {
- Page::DictionaryPage { buf, .. } => buf.len(),
- _ => panic!("expected DictionaryPage"),
- }
- };
+ assert_eq!(get_dict_page_size(col0_meta, data.clone()), 1024 * 1024);
+ assert_eq!(get_dict_page_size(col1_meta, data.clone()), 1024 * 1024 *
4);
+ }
+
+ #[test]
+ fn test_dict_page_size_decided_by_compression_fallback() {
+ // Generate values that are well dispersed across a range
approximating (0..256 * 1024)
+ let array = Arc::new(Int32Array::from_iter(
+ (0i32..1024 * 1024).map(|x| x.wrapping_mul(163019) % 262139),
+ ));
+ let schema = Arc::new(Schema::new(vec![Field::new(
+ "col0",
+ arrow_schema::DataType::Int32,
+ false,
+ )]));
+ let batch = arrow_array::RecordBatch::try_new(schema.clone(),
vec![array]).unwrap();
+
+ let props = WriterProperties::builder()
+ .set_dictionary_page_size_limit(1024 * 1024)
+ .build();
+ let mut writer = ArrowWriter::try_new(Vec::new(), schema.clone(),
Some(props)).unwrap();
+ writer.write(&batch).unwrap();
+ let data = Bytes::from(writer.into_inner().unwrap());
+
+ println!("file length, dictionary: {}", data.len());
Review Comment:
avoid the print in tests, and only print them for failures if this will help
debug
##########
parquet/src/arrow/arrow_writer/mod.rs:
##########
@@ -4813,18 +4909,59 @@ mod tests {
let col0_meta = metadata.row_group(0).column(0);
let col1_meta = metadata.row_group(0).column(1);
- let get_dict_page_size = move |meta: &ColumnChunkMetaData| {
- let mut reader =
- SerializedPageReader::new(Arc::new(data.clone()), meta, 0,
None).unwrap();
- let page = reader.get_next_page().unwrap().unwrap();
- match page {
- Page::DictionaryPage { buf, .. } => buf.len(),
- _ => panic!("expected DictionaryPage"),
- }
- };
+ assert_eq!(get_dict_page_size(col0_meta, data.clone()), 1024 * 1024);
+ assert_eq!(get_dict_page_size(col1_meta, data.clone()), 1024 * 1024 *
4);
+ }
+
+ #[test]
+ fn test_dict_page_size_decided_by_compression_fallback() {
+ // Generate values that are well dispersed across a range
approximating (0..256 * 1024)
+ let array = Arc::new(Int32Array::from_iter(
+ (0i32..1024 * 1024).map(|x| x.wrapping_mul(163019) % 262139),
+ ));
+ let schema = Arc::new(Schema::new(vec![Field::new(
+ "col0",
+ arrow_schema::DataType::Int32,
+ false,
+ )]));
+ let batch = arrow_array::RecordBatch::try_new(schema.clone(),
vec![array]).unwrap();
+
+ let props = WriterProperties::builder()
+ .set_dictionary_page_size_limit(1024 * 1024)
+ .build();
+ let mut writer = ArrowWriter::try_new(Vec::new(), schema.clone(),
Some(props)).unwrap();
+ writer.write(&batch).unwrap();
+ let data = Bytes::from(writer.into_inner().unwrap());
+
+ println!("file length, dictionary: {}", data.len());
+
+ let mut metadata = ParquetMetaDataReader::new();
+ metadata.try_parse(&data).unwrap();
+ let metadata = metadata.finish().unwrap();
+ let full_dict_meta = metadata.row_group(0).column(0);
+ assert_eq!(get_dict_page_size(full_dict_meta, data.clone()),
1_048_576);
+
+ let props = WriterProperties::builder()
+ .set_dictionary_page_size_limit(1024 * 1024)
+ .set_column_dictionary_fallback(
+ ColumnPath::from("col0"),
+ DictionaryFallback::OnUnfavorableAfter(32_768),
+ )
+ .build();
+ let mut writer = ArrowWriter::try_new(Vec::new(), schema.clone(),
Some(props)).unwrap();
+ writer.write(&batch).unwrap();
+ let data = Bytes::from(writer.into_inner().unwrap());
+
+ println!("file length, fallback: {}", data.len());
Review Comment:
avoid the print in tests, and only print them for failures if this will help
debug
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]