tustvold commented on code in PR #2111:
URL: https://github.com/apache/arrow-rs/pull/2111#discussion_r925682705


##########
parquet/src/arrow/record_reader/definition_levels.rs:
##########
@@ -399,6 +442,55 @@ mod tests {
         assert_eq!(decoded.as_slice(), expected.as_slice());
     }
 
+    #[test]
+    fn test_packed_decoder_skip() {
+        let mut rng = thread_rng();
+        let len: usize = rng.gen_range(512..1024) * 8;
+
+        let mut expected = BooleanBufferBuilder::new(len);
+        let mut encoder = RleEncoder::new(1, 1024 * 8);
+
+        for _ in 0..len {
+            let bool = rng.gen_bool(0.8);
+            assert!(encoder.put(bool as u64).unwrap());
+            expected.append(bool);
+        }
+        assert_eq!(expected.len(), len);
+
+        let encoded = encoder.consume().unwrap();
+        let mut decoder = PackedDecoder::new(Encoding::RLE, 
ByteBufferPtr::new(encoded));
+
+        let mut skip_value = 0;
+        let mut read_value = 0;
+        let mut read_data = vec![];
+
+        loop {
+            let remaining = len - read_value - skip_value;
+            if remaining == 0 {
+                break;
+            }
+            let to_read = (rng.gen_range(1..=remaining)) / 8 * 8;

Review Comment:
   Why do we need to read in multiples of 8?



##########
parquet/src/arrow/record_reader/definition_levels.rs:
##########
@@ -399,6 +442,55 @@ mod tests {
         assert_eq!(decoded.as_slice(), expected.as_slice());
     }
 
+    #[test]
+    fn test_packed_decoder_skip() {
+        let mut rng = thread_rng();
+        let len: usize = rng.gen_range(512..1024) * 8;
+
+        let mut expected = BooleanBufferBuilder::new(len);
+        let mut encoder = RleEncoder::new(1, 1024 * 8);
+
+        for _ in 0..len {
+            let bool = rng.gen_bool(0.8);
+            assert!(encoder.put(bool as u64).unwrap());
+            expected.append(bool);
+        }
+        assert_eq!(expected.len(), len);
+
+        let encoded = encoder.consume().unwrap();
+        let mut decoder = PackedDecoder::new(Encoding::RLE, 
ByteBufferPtr::new(encoded));
+
+        let mut skip_value = 0;
+        let mut read_value = 0;
+        let mut read_data = vec![];
+
+        loop {
+            let remaining = len - read_value - skip_value;
+            if remaining == 0 {
+                break;
+            }
+            let to_read = (rng.gen_range(1..=remaining)) / 8 * 8;
+            if rng.gen_bool(0.5) {
+                skip_value += decoder.skip(to_read).unwrap();
+            } else if to_read > 0 {
+                let mut decoded = BooleanBufferBuilder::new(to_read);
+                read_value += decoder.read(&mut decoded, to_read).unwrap();
+                read_data.push(decoded.as_slice().to_vec());
+            }
+        }
+
+        assert_eq!(read_value + skip_value, len);
+
+        let expected = expected.as_slice();
+        for data in read_data.iter().enumerate() {
+            assert!(find_subsequence(expected, data.1).is_some());

Review Comment:
   Perhaps rather than this method, which will yield false positives, we could 
keep the original full set of bools, and build up the expected result as we go?



##########
parquet/src/arrow/record_reader/definition_levels.rs:
##########
@@ -226,10 +226,27 @@ impl ColumnLevelDecoder for DefinitionLevelBufferDecoder {
 impl DefinitionLevelDecoder for DefinitionLevelBufferDecoder {
     fn skip_def_levels(
         &mut self,
-        _num_levels: usize,
-        _max_def_level: i16,
+        num_levels: usize,
+        max_def_level: i16,
     ) -> Result<(usize, usize)> {
-        Err(nyi_err!("https://github.com/apache/arrow-rs/issues/1792";))
+        // For now only support max_def_level == 1
+        if max_def_level == 1 {

Review Comment:
   This isn't technically correct, if you have a nullable StructArray, 
containing a non-nullable child. The child will have a max definition level of 
1, but it will be using `ColumnLevelDecoderImpl` not `PackedDecoder`.



##########
parquet/src/arrow/record_reader/definition_levels.rs:
##########
@@ -226,10 +226,27 @@ impl ColumnLevelDecoder for DefinitionLevelBufferDecoder {
 impl DefinitionLevelDecoder for DefinitionLevelBufferDecoder {
     fn skip_def_levels(
         &mut self,
-        _num_levels: usize,
-        _max_def_level: i16,
+        num_levels: usize,
+        max_def_level: i16,
     ) -> Result<(usize, usize)> {
-        Err(nyi_err!("https://github.com/apache/arrow-rs/issues/1792";))
+        // For now only support max_def_level == 1
+        if max_def_level == 1 {
+            let decoder = match self.data.take() {
+                Some(data) => self
+                    .packed_decoder
+                    .insert(PackedDecoder::new(self.encoding, data)),

Review Comment:
   Again this isn't technically correct, as `PackedDecoder` can only be used if 
the leaf is the nullable column (and not a parent).



##########
parquet/src/column/reader/decoder.rs:
##########
@@ -307,10 +307,28 @@ impl ColumnLevelDecoder for ColumnLevelDecoderImpl {
 impl DefinitionLevelDecoder for ColumnLevelDecoderImpl {
     fn skip_def_levels(
         &mut self,
-        _num_levels: usize,
-        _max_def_level: i16,
+        num_levels: usize,
+        max_def_level: i16,
     ) -> Result<(usize, usize)> {
-        Err(nyi_err!("https://github.com/apache/arrow-rs/issues/1792";))
+        // For now only support max_def_level == 1
+        if max_def_level == 1 {

Review Comment:
   Again this is incorrect, should just be a case of returning nyi_err if not 
PackedDecoder



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to