GregBowyer commented on a change in pull request #8698:
URL: https://github.com/apache/arrow/pull/8698#discussion_r534655181



##########
File path: rust/parquet/src/data_type.rs
##########
@@ -583,32 +740,198 @@ pub(crate) mod private {
     impl_from_raw!(f64, self => { Err(general_err!("Type cannot be converted 
to i64")) });
 
     impl ParquetValueType for super::Int96 {
-        fn encoded(&self) -> EncodedValue<'_> {
-            EncodedValue::Bytes { data: self.as_bytes() }
+        #[inline]
+        fn encode<W: std::io::Write>(values: &[Self], writer: &mut W, _: &mut 
BitWriter) -> Result<()> {
+            for value in values {
+                let raw = unsafe {
+                    std::slice::from_raw_parts(value.data() as *const [u32] as 
*const u8, 12)
+                };
+                writer.write_all(raw)?;
+            }
+            Ok(())
         }
 
+        #[inline]
+        fn set_data(decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, 
num_values: usize) {
+            decoder.data.replace(data);
+            decoder.start = 0;
+            decoder.num_values = num_values;
+        }
+
+        #[inline]
+        fn decode(buffer: &mut [Self], decoder: &mut PlainDecoderDetails) -> 
Result<usize> {
+            // TODO - Remove the duplication between this and the general 
slice method
+            let data = decoder.data.as_ref().expect("set_data should have been 
called");
+            let num_values = std::cmp::min(buffer.len(), decoder.num_values);
+            let bytes_left = data.len() - decoder.start;
+            let bytes_to_decode = 12 * num_values;
+
+            if bytes_left < bytes_to_decode {
+                return Err(eof_err!("Not enough bytes to decode"));
+            }
+
+            let data_range = data.range(decoder.start, bytes_to_decode);
+            let bytes: &[u8] = data_range.data();
+            decoder.start += bytes_to_decode;
+
+            let mut pos = 0; // position in byte array
+            for i in 0..num_values {
+                let elem0 = byteorder::LittleEndian::read_u32(&bytes[pos..pos 
+ 4]);
+                let elem1 = byteorder::LittleEndian::read_u32(&bytes[pos + 
4..pos + 8]);
+                let elem2 = byteorder::LittleEndian::read_u32(&bytes[pos + 
8..pos + 12]);
+
+                buffer[i]
+                    .as_mut_any()
+                    .downcast_mut::<Self>()
+                    .unwrap()
+                    .set_data(elem0, elem1, elem2);
+
+                pos += 12;
+            }
+            decoder.num_values -= num_values;
+
+            Ok(num_values)
+        }
+
+        #[inline]
         fn as_any(&self) -> &dyn std::any::Any {
             self
         }
 
+        #[inline]
         fn as_mut_any(&mut self) -> &mut dyn std::any::Any {
             self
         }
     }
 
+    // TODO - Why does macro importing fail?
+    /// Reads `$size` of bytes from `$src`, and reinterprets them as type 
`$ty`, in
+    /// little-endian order. `$ty` must implement the `Default` trait. 
Otherwise this won't
+    /// compile.
+    /// This is copied and modified from byteorder crate.
+    macro_rules! read_num_bytes {
+        ($ty:ty, $size:expr, $src:expr) => {{
+            assert!($size <= $src.len());
+            let mut buffer = <$ty as 
$crate::util::bit_util::FromBytes>::Buffer::default();
+            buffer.as_mut()[..$size].copy_from_slice(&$src[..$size]);
+            <$ty>::from_ne_bytes(buffer)
+        }};
+    }
+
     impl ParquetValueType for super::ByteArray {
-        fn encoded(&self) -> EncodedValue<'_> {
-            EncodedValue::Bytes { data: self.data() }
+        #[inline]
+        fn encode<W: std::io::Write>(values: &[Self], writer: &mut W, _: &mut 
BitWriter) -> Result<()> {
+            for value in values {
+                let len: u32 = value.len().try_into().unwrap();
+                writer.write_all(&len.to_ne_bytes())?;
+                let raw = value.data();
+                writer.write_all(raw)?;
+            }
+            Ok(())
+        }
+
+        #[inline]
+        fn set_data(decoder: &mut PlainDecoderDetails, data: ByteBufferPtr, 
num_values: usize) {
+            decoder.data.replace(data);
+            decoder.start = 0;
+            decoder.num_values = num_values;
+        }
+
+        #[inline]
+        fn decode(buffer: &mut [Self], decoder: &mut PlainDecoderDetails) -> 
Result<usize> {
+            let data = decoder.data.as_mut().expect("set_data should have been 
called");
+            let num_values = std::cmp::min(buffer.len(), decoder.num_values);
+            for i in 0..num_values {
+                let len: usize = read_num_bytes!(u32, 4, 
data.start_from(decoder.start).as_ref()) as usize;
+                decoder.start += std::mem::size_of::<u32>();
+
+                if data.len() < decoder.start + len {
+                    return Err(eof_err!("Not enough bytes to decode"));
+                }
+
+                let val: &mut Self = buffer[i]
+                    .as_mut_any()
+                    .downcast_mut()
+                    .unwrap();
+
+                val.set_data(data.range(decoder.start, len));
+                decoder.start += len;
+            }
+            decoder.num_values -= num_values;
+
+            Ok(num_values)
+        }
+
+        #[inline]
+        fn dict_encoding_size(&self) -> (usize, usize) {
+            (std::mem::size_of::<u32>(), self.len())
+        }
+
+        #[inline]
+        fn as_any(&self) -> &dyn std::any::Any {
+            self
         }
 
+        #[inline]
+        fn as_mut_any(&mut self) -> &mut dyn std::any::Any {
+            self
+        }
+    }
+
+    impl ParquetValueType for super::FixedLenByteArray {

Review comment:
       The lifted `FixedLenByteArray` becomes used here to restore (actually 
make _better_) the performance of encoding and decoding.
   
   The logic is I think different enough to warrant some duplication.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to