mkarbo commented on code in PR #7535:
URL: https://github.com/apache/arrow-rs/pull/7535#discussion_r2104562325


##########
parquet-variant/src/variant.rs:
##########
@@ -18,158 +109,223 @@ impl<'m> VariantMetadata<'m> {
         self.bytes
     }
 
+    pub fn try_new(bytes: &'m [u8]) -> Result<Self, ArrowError> {
+        let header = VariantMetadataHeader::try_new(bytes)?;
+        // Offset 1, index 0 because first element after header is dictionary 
size
+        let dict_size = header.offset_size.unpack_usize(bytes, 1, 0)?;
+
+        // TODO: Refactor, add test for validation
+        let valid = (0..=dict_size)
+            .map(|i| header.offset_size.unpack_usize(bytes, 1, i + 1))
+            .scan(0, |prev, cur| {
+                let Ok(cur_offset) = cur else {
+                    return Some(false);
+                };
+                // Skip the first offset, which is always 0
+                if *prev == 0 {
+                    *prev = cur_offset;
+                    return Some(true);
+                }
+
+                let valid = cur_offset > *prev;
+                *prev = cur_offset;
+                Some(valid)
+            })
+            .all(|valid| valid);
+
+        if !valid {
+            return Err(ArrowError::InvalidArgumentError(
+                "Offsets are not monotonically increasing".to_string(),
+            ));
+        }
+        Ok(Self {
+            bytes,
+            header,
+            dict_size,
+        })
+    }
+
     /// Whether the dictionary keys are sorted and unique
     pub fn is_sorted(&self) -> bool {
-        todo!()
+        self.header.is_sorted
     }
 
-    /// Get the dict length
-    pub fn dict_len(&self) -> Result<usize, ArrowError> {
-        let end_location = self.offset_size()? as usize + 1;
-        if self.bytes.len() < end_location {
-            let err_str = format!(
-                "Invalid metadata bytes, must have at least length {} but has 
length {}",
-                &end_location,
-                self.bytes.len()
-            );
-            return Err(ArrowError::InvalidArgumentError(err_str));
-        }
-        let dict_len_bytes = &self.bytes[1..end_location];
-        let dict_len = 
usize::from_le_bytes(dict_len_bytes.try_into().map_err(|e| {
-            ArrowError::InvalidArgumentError(format!(
-                "Unable to convert dictionary_size bytes into usize: {}",
-                e,
-            ))
-        })?);
-        Ok(dict_len)
+    /// Get the dictionary size
+    pub fn dictionary_size(&self) -> usize {
+        self.dict_size
     }
-    pub fn version(&self) -> usize {
-        todo!()
+    pub fn version(&self) -> u8 {
+        self.header.version
     }
 
-    /// Get the offset by index
-    pub fn get_offset_by(&self, index: usize) -> Result<usize, ArrowError> {
-        todo!()
+    /// Get the offset by key-index
+    pub fn get_offset_by(&self, index: usize) -> Result<Range<usize>, 
ArrowError> {
+        // TODO: Should we memoize the offsets? There could be thousands of 
them (https://github.com/apache/arrow-rs/pull/7535#discussion_r2101351294)
+        if index >= self.dict_size {
+            return Err(ArrowError::InvalidArgumentError(format!(
+                "Index {} out of bounds for dictionary of length {}",
+                index, self.dict_size
+            )));
+        }
+
+        // Skipping the header byte (setting byte_offset = 1) and the 
dictionary_size (setting offset_index +1)
+        // TODO: Validate size before looking up?
+        // TODO: Fix location / bytes here, the index is wrong.
+        let start = self
+            .header
+            .offset_size
+            .unpack_usize(self.bytes, 1, index + 1)?;
+        let end = self
+            .header
+            .offset_size
+            .unpack_usize(self.bytes, 1, index + 2)?;
+        Ok(start..end)
     }
 
-    /// Get the header byte, which has the following form
-    ///              7     6  5   4  3             0
-    ///             +-------+---+---+---------------+
-    /// header      |       |   |   |    version    |
-    ///             +-------+---+---+---------------+
-    ///                 ^         ^
-    ///                 |         +-- sorted_strings
-    ///                 +-- offset_size_minus_one
-    /// The version is a 4-bit value that must always contain the value 1.
-    /// - sorted_strings is a 1-bit value indicating whether dictionary 
strings are sorted and unique.
-    /// - offset_size_minus_one is a 2-bit value providing the number of bytes 
per dictionary size and offset field.
-    /// - The actual number of bytes, offset_size, is offset_size_minus_one + 1
-    pub fn header(&self) -> Result<u8, ArrowError> {
-        if self.bytes.is_empty() {
-            return Err(ArrowError::InvalidArgumentError(
-                "Can't get header from empty buffer".to_string(),
-            ));
+    /// Get the key-name by index
+    pub fn get_field_by_index(&self, index: usize) -> Result<&'m str, 
ArrowError> {
+        match self.get_offset_by(index) {
+            Ok(range) => self.get_field_by_offset(range),
+            Err(e) => Err(e),
         }
-        Ok(self.bytes[0])
     }
 
-    /// Get the offset_minus_one value from the header
-    pub fn offset_size_minus_one(&self) -> Result<u8, ArrowError> {
-        if self.bytes.is_empty() {
-            Err(ArrowError::InvalidArgumentError(
-                "Tried to get offset_size_minus_one from header, but 
self.bytes buffer is emtpy."
-                    .to_string(),
-            ))
-        } else {
-            Ok(self.bytes[0] & (0b11 << 6)) // Grab the last 2 bits
-        }
+    /// Gets the field using an offset (Range) - helper method to keep 
consistent API.
+    pub fn get_field_by_offset(&self, offset: Range<usize>) -> Result<&'m str, 
ArrowError> {
+        let dictionary_key_start_byte = 1 // header
+                    + self.header.offset_size as usize // dictionary_size 
field itself
+                    + (self.dict_size + 1) * (self.header.offset_size as 
usize); // all offset entries
+        let dictionary_keys_bytes =
+            slice_from_slice(self.bytes, 
dictionary_key_start_byte..self.bytes.len())?;
+        let dictionary_key_bytes =
+            slice_from_slice(dictionary_keys_bytes, offset.start..offset.end)?;
+        let result = str::from_utf8(dictionary_key_bytes).map_err(|_| 
invalid_utf8_err())?;
+        Ok(result)
     }
 
-    /// Get the offset_size
-    pub fn offset_size(&self) -> Result<u8, ArrowError> {
-        Ok(self.offset_size_minus_one()? + 1)
+    pub fn header(&self) -> VariantMetadataHeader {
+        self.header
     }
 
     /// Get the offsets as an iterator
-    // TODO: Do we want this kind of API?
-    // TODO: Test once API is agreed upon
-    pub fn offsets(&'m self) -> Result<impl Iterator<Item = (usize, usize)> + 
'm, ArrowError> {
+    // TODO: Write tests
+    pub fn offsets(
+        &'m self,
+    ) -> Result<impl Iterator<Item = Result<Range<usize>, ArrowError>> + 'm, 
ArrowError> {
         struct OffsetIterators<'m> {
             buffer: &'m [u8],
+            header: &'m VariantMetadataHeader,
             dict_len: usize,
             seen: usize,
-            offset_size: usize,
         }
         impl<'m> Iterator for OffsetIterators<'m> {
-            type Item = (usize, usize); // (start, end) positions of the bytes
-
-            // TODO: Check bounds here or ensure they're correct
-
+            type Item = Result<Range<usize>, ArrowError>; // Range = (start, 
end) positions of the bytes
             fn next(&mut self) -> Option<Self::Item> {
-                // +1 to skip the first offset
                 if self.seen < self.dict_len {
-                    let start = usize::from_le_bytes(
-                        self.buffer[(self.seen ) * self.offset_size + 1 // +1 
to skip header
-                            ..(self.seen ) * self.offset_size + 1]
-                            .try_into()
-                            .ok()?,
-                    );
+                    let start = self
+                        .header
+                        .offset_size
+                        // skip header via byte_offset=1 and self.seen + 1 
because first is dictionary_size
+                        .unpack_usize(self.buffer, 1, self.seen + 1);

Review Comment:
   Reg. memory error - quite possibly yeah, let me check the variants of 
ArrowError again before we make the PR ready to merge.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to