yordan-pavlov commented on a change in pull request #1082:
URL: https://github.com/apache/arrow-rs/pull/1082#discussion_r776110779



##########
File path: parquet/src/arrow/array_reader/byte_array.rs
##########
@@ -0,0 +1,639 @@
+use crate::arrow::array_reader::{read_records, ArrayReader};
+use crate::arrow::record_reader::buffer::{RecordBuffer, TypedBuffer, 
ValueBuffer};
+use crate::arrow::record_reader::GenericRecordReader;
+use crate::arrow::schema::parquet_to_arrow_field;
+use crate::basic::Encoding;
+use crate::column::page::PageIterator;
+use crate::column::reader::decoder::{ColumnValueDecoder, ValuesWriter};
+use crate::data_type::Int32Type;
+use crate::decoding::{Decoder, DeltaBitPackDecoder};
+use crate::encodings::rle::RleDecoder;
+use crate::errors::{ParquetError, Result};
+use crate::memory::ByteBufferPtr;
+use crate::schema::types::ColumnDescPtr;
+use arrow::array::{
+    ArrayData, ArrayDataBuilder, ArrayRef, BinaryArray, LargeBinaryArray,
+    LargeStringArray, OffsetSizeTrait, StringArray,
+};
+use arrow::buffer::Buffer;
+use arrow::datatypes::DataType as ArrowType;
+use std::any::Any;
+use std::ops::Range;
+use std::sync::Arc;
+
+enum Reader {
+    Binary(GenericRecordReader<OffsetBuffer<i32>, ByteArrayDecoder<i32>>),
+    LargeBinary(GenericRecordReader<OffsetBuffer<i64>, ByteArrayDecoder<i64>>),
+    Utf8(GenericRecordReader<OffsetBuffer<i32>, ByteArrayDecoder<i32>>),
+    LargeUtf8(GenericRecordReader<OffsetBuffer<i64>, ByteArrayDecoder<i64>>),
+}
+
+fn consume_array_data<I: OffsetSizeTrait>(
+    data_type: ArrowType,
+    reader: &mut GenericRecordReader<OffsetBuffer<I>, ByteArrayDecoder<I>>,
+) -> Result<ArrayData> {
+    let buffer = reader.consume_record_data()?;
+    let mut array_data_builder = ArrayDataBuilder::new(data_type)
+        .len(buffer.len())
+        .add_buffer(buffer.offsets.into())
+        .add_buffer(buffer.values.into());
+
+    if let Some(buffer) = reader.consume_bitmap_buffer()? {
+        array_data_builder = array_data_builder.null_bit_buffer(buffer);
+    }
+    Ok(unsafe { array_data_builder.build_unchecked() })
+}
+
+pub struct ByteArrayReader {
+    data_type: ArrowType,
+    pages: Box<dyn PageIterator>,
+    def_levels_buffer: Option<Buffer>,
+    rep_levels_buffer: Option<Buffer>,
+    column_desc: ColumnDescPtr,
+    record_reader: Reader,
+}
+
+impl ByteArrayReader {
+    /// Construct primitive array reader.
+    pub fn new(
+        pages: Box<dyn PageIterator>,
+        column_desc: ColumnDescPtr,
+        arrow_type: Option<ArrowType>,
+    ) -> Result<Self> {
+        Self::new_with_options(pages, column_desc, arrow_type, false)
+    }
+
+    /// Construct primitive array reader with ability to only compute null 
mask and not
+    /// buffer level data
+    pub fn new_with_options(
+        pages: Box<dyn PageIterator>,
+        column_desc: ColumnDescPtr,
+        arrow_type: Option<ArrowType>,
+        null_mask_only: bool,
+    ) -> Result<Self> {
+        // Check if Arrow type is specified, else create it from Parquet type
+        let data_type = match arrow_type {
+            Some(t) => t,
+            None => parquet_to_arrow_field(column_desc.as_ref())?
+                .data_type()
+                .clone(),
+        };
+
+        let record_reader = match data_type {
+            ArrowType::Binary => 
Reader::Binary(GenericRecordReader::new_with_options(
+                column_desc.clone(),
+                null_mask_only,
+            )),
+            ArrowType::LargeBinary => {
+                Reader::LargeBinary(GenericRecordReader::new_with_options(
+                    column_desc.clone(),
+                    null_mask_only,
+                ))
+            }
+            ArrowType::Utf8 => 
Reader::Utf8(GenericRecordReader::new_with_options(
+                column_desc.clone(),
+                null_mask_only,
+            )),
+            ArrowType::LargeUtf8 => {
+                Reader::LargeUtf8(GenericRecordReader::new_with_options(
+                    column_desc.clone(),
+                    null_mask_only,
+                ))
+            }
+            _ => {
+                return Err(general_err!(
+                    "invalid data type for ByteArrayReader - {}",
+                    data_type
+                ))
+            }
+        };
+
+        Ok(Self {
+            data_type,
+            pages,
+            def_levels_buffer: None,
+            rep_levels_buffer: None,
+            column_desc,
+            record_reader,
+        })
+    }
+}
+
+impl ArrayReader for ByteArrayReader {
+    fn as_any(&self) -> &dyn Any {
+        self
+    }
+
+    fn get_data_type(&self) -> &ArrowType {
+        &self.data_type
+    }
+
+    fn next_batch(&mut self, batch_size: usize) -> 
crate::errors::Result<ArrayRef> {
+        let data = match &mut self.record_reader {
+            Reader::Binary(r) | Reader::Utf8(r) => {
+                read_records(r, self.pages.as_mut(), batch_size)?;
+                let data = consume_array_data(self.data_type.clone(), r)?;
+                self.def_levels_buffer = r.consume_def_levels()?;
+                self.rep_levels_buffer = r.consume_rep_levels()?;
+                r.reset();
+                data
+            }
+            Reader::LargeBinary(r) | Reader::LargeUtf8(r) => {
+                read_records(r, self.pages.as_mut(), batch_size)?;
+                let data = consume_array_data(self.data_type.clone(), r)?;
+                self.def_levels_buffer = r.consume_def_levels()?;
+                self.rep_levels_buffer = r.consume_rep_levels()?;
+                r.reset();
+                data
+            }
+        };
+
+        Ok(match &self.record_reader {
+            Reader::Binary(_) => Arc::new(BinaryArray::from(data)),

Review comment:
       I wonder if the the type of array to create should be a generic 
parameter as well?




-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to