jhorstmann commented on code in PR #9022:
URL: https://github.com/apache/arrow-rs/pull/9022#discussion_r2638140741
##########
arrow-buffer/src/buffer/boolean.rs:
##########
@@ -212,6 +212,126 @@ impl BooleanBuffer {
Some(BooleanBuffer::new(buffer, 0, len_in_bits))
}
+ /// Create a new [`BooleanBuffer`] by applying the bitwise operation `op`
to
+ /// the relevant bits from two input buffers.
+ ///
+ /// This function is faster than applying the operation bit by bit as
+ /// it processes input buffers in chunks of 64 bits (8 bytes) at a time
+ ///
+ /// # Notes:
+ /// See notes on [Self::from_bitwise_unary_op]
+ ///
+ /// # See Also
+ /// - [`BooleanBuffer::from_bitwise_unary_op`] for unary operations on a
single input buffer.
+ /// - [`apply_bitwise_binary_op`](bit_util::apply_bitwise_binary_op) for
in-place binary bitwise operations
+ ///
+ /// # Example: Create new [`BooleanBuffer`] from bitwise `AND` of two
[`Buffer`]s
+ /// ```
+ /// # use arrow_buffer::{Buffer, BooleanBuffer};
+ /// let left = Buffer::from(vec![0b11001100u8, 0b10111010u8]); // 2 bytes
= 16 bits
+ /// let right = Buffer::from(vec![0b10101010u8, 0b11011100u8,
0b11110000u8]); // 3 bytes = 24 bits
+ /// // AND of the first 12 bits
+ /// let result = BooleanBuffer::from_bitwise_binary_op(
+ /// &left, 0, &right, 0, 12, |a, b| a & b
+ /// );
+ /// assert_eq!(result.inner().as_slice(), &[0b10001000u8, 0b00001000u8]);
+ /// ```
+ ///
+ /// # Example: Create new [`BooleanBuffer`] from bitwise `OR` of two byte
slices
+ /// ```
+ /// # use arrow_buffer::BooleanBuffer;
+ /// let left = [0b11001100u8, 0b10111010u8];
+ /// let right = [0b10101010u8, 0b11011100u8];
+ /// // OR of bits 4..16 from left and bits 0..12 from right
+ /// let result = BooleanBuffer::from_bitwise_binary_op(
+ /// &left, 4, &right, 0, 12, |a, b| a | b
+ /// );
+ /// assert_eq!(result.inner().as_slice(), &[0b10101110u8, 0b00001111u8]);
+ /// ```
+ pub fn from_bitwise_binary_op<F>(
+ left: impl AsRef<[u8]>,
+ left_offset_in_bits: usize,
+ right: impl AsRef<[u8]>,
+ right_offset_in_bits: usize,
+ len_in_bits: usize,
+ mut op: F,
+ ) -> Self
+ where
+ F: FnMut(u64, u64) -> u64,
+ {
+ // try fast path for aligned input
+ if left_offset_in_bits & 0x7 == 0 && right_offset_in_bits & 0x7 == 0 {
+ if let Some(result) = Self::try_from_aligned_bitwise_binary_op(
+ &left.as_ref()[left_offset_in_bits / 8..], // aligned to byte
boundary
+ &right.as_ref()[right_offset_in_bits / 8..],
+ len_in_bits,
+ &mut op,
+ ) {
+ return result;
+ }
+ }
+ let left_chunks = BitChunks::new(left.as_ref(), left_offset_in_bits,
len_in_bits);
+ let right_chunks = BitChunks::new(right.as_ref(),
right_offset_in_bits, len_in_bits);
+
+ let chunks = left_chunks
+ .iter()
+ .zip(right_chunks.iter())
+ .map(|(left, right)| op(left, right));
+ // Soundness: `BitChunks` is a `BitChunks` iterator which
+ // correctly reports its upper bound
+ let mut buffer = unsafe { MutableBuffer::from_trusted_len_iter(chunks)
};
+
+ let remainder_bytes =
util::bit_util::ceil(left_chunks.remainder_len(), 8);
+ let rem = op(left_chunks.remainder_bits(),
right_chunks.remainder_bits());
+ // we are counting its starting from the least significant bit, to
to_le_bytes should be correct
+ let rem = &rem.to_le_bytes()[0..remainder_bytes];
+ buffer.extend_from_slice(rem);
+
+ BooleanBuffer {
+ buffer: Buffer::from(buffer),
+ offset: 0,
+ len: len_in_bits,
+ }
+ }
+
+ /// Like [`Self::from_bitwise_binary_op`] but optimized for the case where
the
+ /// inputs are aligned to byte boundaries
+ ///
+ /// Returns `None` if the inputs are not fully u64 aligned
+ fn try_from_aligned_bitwise_binary_op<F>(
+ left: &[u8],
+ right: &[u8],
+ len_in_bits: usize,
+ op: &mut F,
+ ) -> Option<Self>
+ where
+ F: FnMut(u64, u64) -> u64,
+ {
+ // Safety: all valid bytes are valid u64s
+ let (left_prefix, left_u64s, left_suffix) = unsafe {
left.align_to::<u64>() };
Review Comment:
Do you know how much of a difference the full u64 alignment really makes on
current target architectures? I think unaligned loads on x86 are not really
slower. [Maybe only if they cross a cache
line](https://travisdowns.github.io/blog/2019/06/11/speed-limits.html#load-split-cache-lines),
which should happen every 8th load. With that assumption, using
`slice::as_chunks` and `u64::from_le_bytes` on each chunk might be simpler, and
would then only require handling a suffix.
Staying with `align_to` and bailing on non-empty prefixes should also be
fine, I would expect buffers to be aligned most of the time, but having a byte
length that is a multiple of 8 might be less likely.
Other than this nit the code looks good to me :+1:
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]