mkarbo commented on code in PR #7535: URL: https://github.com/apache/arrow-rs/pull/7535#discussion_r2115375272
########## parquet-variant/src/variant.rs: ########## @@ -0,0 +1,714 @@ +use crate::decoder::{ + self, get_basic_type, get_primitive_type, VariantBasicType, VariantPrimitiveType, +}; +use crate::utils::{array_from_slice, first_byte_from_slice, slice_from_slice, string_from_slice}; +use arrow_schema::ArrowError; +use std::{ + num::TryFromIntError, + ops::{Index, Range}, +}; + +#[derive(Clone, Debug, Copy, PartialEq)] +enum OffsetSizeBytes { + One = 1, + Two = 2, + Three = 3, + Four = 4, +} + +impl OffsetSizeBytes { + /// Build from the `offset_size_minus_one` bits (see spec). + fn try_new(offset_size_minus_one: u8) -> Result<Self, ArrowError> { + use OffsetSizeBytes::*; + let result = match offset_size_minus_one { + 0 => One, + 1 => Two, + 2 => Three, + 3 => Four, + _ => { + return Err(ArrowError::InvalidArgumentError( + "offset_size_minus_one must be 0–3".to_string(), + )) + } + }; + Ok(result) + } + + /// Return one unsigned little-endian value from `bytes`. + /// + /// * `bytes` – the Variant-metadata buffer. + /// * `byte_offset` – number of bytes to skip **before** reading the first + /// value (usually `1` to move past the header byte). + /// * `offset_index` – 0-based index **after** the skip + /// (`0` is the first value, `1` the next, …). + /// + /// Each value is `self as usize` bytes wide (1, 2, 3 or 4). + /// Three-byte values are zero-extended to 32 bits before the final + /// fallible cast to `usize`. + fn unpack_usize( + &self, + bytes: &[u8], + byte_offset: usize, // how many bytes to skip + offset_index: usize, // which offset in an array of offsets + ) -> Result<usize, ArrowError> { + use OffsetSizeBytes::*; + let offset = byte_offset + (*self as usize) * offset_index; + let result = match self { + One => u8::from_le_bytes(array_from_slice(bytes, offset)?).into(), + Two => u16::from_le_bytes(array_from_slice(bytes, offset)?).into(), + Three => { + // Let's grab the three byte le-chunk first + let b3_chunks: [u8; 3] = array_from_slice(bytes, offset)?; + // Let's pad it and construct a padded u32 from it. + let mut buf = [0u8; 4]; + buf[..3].copy_from_slice(&b3_chunks); + u32::from_le_bytes(buf) + .try_into() + .map_err(|e: TryFromIntError| ArrowError::InvalidArgumentError(e.to_string()))? + } + Four => u32::from_le_bytes(array_from_slice(bytes, offset)?) + .try_into() + .map_err(|e: TryFromIntError| ArrowError::InvalidArgumentError(e.to_string()))?, + }; + Ok(result) + } +} + +#[derive(Clone, Debug, Copy, PartialEq)] +pub(crate) struct VariantMetadataHeader { + version: u8, + is_sorted: bool, + /// Note: This is `offset_size_minus_one` + 1 + offset_size: OffsetSizeBytes, +} + +// According to the spec this is currently always = 1, and so we store this const for validation +// purposes and to make that visible. +const CORRECT_VERSION_VALUE: u8 = 1; + +impl VariantMetadataHeader { + /// Tries to construct the variant metadata header, which has the form + /// 7 6 5 4 3 0 + /// +-------+---+---+---------------+ + /// header | | | | version | + /// +-------+---+---+---------------+ + /// ^ ^ + /// | +-- sorted_strings + /// +-- offset_size_minus_one + /// The version is a 4-bit value that must always contain the value 1. + /// - sorted_strings is a 1-bit value indicating whether dictionary strings are sorted and unique. + /// - offset_size_minus_one is a 2-bit value providing the number of bytes per dictionary size and offset field. + /// - The actual number of bytes, offset_size, is offset_size_minus_one + 1 + pub fn try_new(bytes: &[u8]) -> Result<Self, ArrowError> { + let header = first_byte_from_slice(bytes)?; + + let version = header & 0x0F; // First four bits + if version != CORRECT_VERSION_VALUE { + let err_msg = format!( + "The version bytes in the header is not {CORRECT_VERSION_VALUE}, got {:b}", + version + ); + return Err(ArrowError::InvalidArgumentError(err_msg)); + } + let is_sorted = (header & 0x10) != 0; // Fifth bit + let offset_size_minus_one = header >> 6; // Last two bits + Ok(Self { + version, + is_sorted, + offset_size: OffsetSizeBytes::try_new(offset_size_minus_one)?, + }) + } +} + +#[derive(Clone, Copy, Debug, PartialEq)] +/// Encodes the Variant Metadata, see the Variant spec file for more information +pub struct VariantMetadata<'m> { + bytes: &'m [u8], + header: VariantMetadataHeader, + dict_size: usize, + dictionary_key_start_byte: usize, +} + +impl<'m> VariantMetadata<'m> { + /// View the raw bytes (needed by very low-level decoders) + #[inline] + pub const fn as_bytes(&self) -> &'m [u8] { + self.bytes + } + + pub fn try_new(bytes: &'m [u8]) -> Result<Self, ArrowError> { + let header = VariantMetadataHeader::try_new(bytes)?; + // Offset 1, index 0 because first element after header is dictionary size + let dict_size = header.offset_size.unpack_usize(bytes, 1, 0)?; + + // Check that we have the correct metadata length according to dictionary_size, or return + // error early. + // Minimum number of bytes the metadata buffer must contain: + // 1 byte header + // + offset_size-byte `dictionary_size` field + // + (dict_size + 1) offset entries, each `offset_size` bytes. (Table size, essentially) + // 1 + offset_size + (dict_size + 1) * offset_size + let offset_size = header.offset_size as usize; // Cheap to copy + + let dictionary_key_start_byte = 1usize // 1-byte header + .checked_add(offset_size) // 1 + offset_size + .and_then(|p| { + dict_size + .checked_add(1) // dict_size + 1 + .and_then(|n| n.checked_mul(offset_size)) + .and_then(|table_size| p.checked_add(table_size)) + }) + .ok_or_else(|| ArrowError::InvalidArgumentError("metadata length overflow".into()))?; + if bytes.len() < dictionary_key_start_byte { + return Err(ArrowError::InvalidArgumentError( + "Metadata shorter than dictionary_size implies".to_string(), + )); + } + + // Check that all offsets are monotonically increasing + let mut prev = None; + for (i, offset) in (0..=dict_size) + .map(|i| header.offset_size.unpack_usize(bytes, 1, i + 1)) + .enumerate() + { + let offset = offset?; + if i == 0 && offset != 0 { + return Err(ArrowError::InvalidArgumentError( + "First offset is non-zero".to_string(), + )); + } + if prev.is_some_and(|prev| prev >= offset) { + return Err(ArrowError::InvalidArgumentError( + "Offsets are not monotonically increasing".to_string(), + )); + } + prev = Some(offset); + } Review Comment: Thanks that's a neat way to do the first value check with `@` bind! -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org