alamb commented on code in PR #9022:
URL: https://github.com/apache/arrow-rs/pull/9022#discussion_r2639949361


##########
arrow-buffer/src/buffer/boolean.rs:
##########
@@ -212,6 +212,126 @@ impl BooleanBuffer {
         Some(BooleanBuffer::new(buffer, 0, len_in_bits))
     }
 
+    /// Create a new [`BooleanBuffer`] by applying the bitwise operation `op` 
to
+    /// the relevant bits from two input buffers.
+    ///
+    /// This function is faster than applying the operation bit by bit as
+    /// it processes input buffers in chunks of 64 bits (8 bytes) at a time
+    ///
+    /// # Notes:
+    /// See notes on [Self::from_bitwise_unary_op]
+    ///
+    /// # See Also
+    /// - [`BooleanBuffer::from_bitwise_unary_op`] for unary operations on a 
single input buffer.
+    /// - [`apply_bitwise_binary_op`](bit_util::apply_bitwise_binary_op) for 
in-place binary bitwise operations
+    ///
+    /// # Example: Create new [`BooleanBuffer`] from bitwise `AND` of two 
[`Buffer`]s
+    /// ```
+    /// # use arrow_buffer::{Buffer, BooleanBuffer};
+    /// let left = Buffer::from(vec![0b11001100u8, 0b10111010u8]); // 2 bytes 
= 16 bits
+    /// let right = Buffer::from(vec![0b10101010u8, 0b11011100u8, 
0b11110000u8]); // 3 bytes = 24 bits
+    /// // AND of the first 12 bits
+    /// let result = BooleanBuffer::from_bitwise_binary_op(
+    ///   &left, 0, &right, 0, 12, |a, b| a & b
+    /// );
+    /// assert_eq!(result.inner().as_slice(), &[0b10001000u8, 0b00001000u8]);
+    /// ```
+    ///
+    /// # Example: Create new [`BooleanBuffer`] from bitwise `OR` of two byte 
slices
+    /// ```
+    /// # use arrow_buffer::BooleanBuffer;
+    /// let left = [0b11001100u8, 0b10111010u8];
+    /// let right = [0b10101010u8, 0b11011100u8];
+    /// // OR of bits 4..16 from left and bits 0..12 from right
+    /// let result = BooleanBuffer::from_bitwise_binary_op(
+    ///  &left, 4, &right, 0, 12, |a, b| a | b
+    /// );
+    /// assert_eq!(result.inner().as_slice(), &[0b10101110u8, 0b00001111u8]);
+    /// ```
+    pub fn from_bitwise_binary_op<F>(
+        left: impl AsRef<[u8]>,
+        left_offset_in_bits: usize,
+        right: impl AsRef<[u8]>,
+        right_offset_in_bits: usize,
+        len_in_bits: usize,
+        mut op: F,
+    ) -> Self
+    where
+        F: FnMut(u64, u64) -> u64,
+    {
+        // try fast path for aligned input
+        if left_offset_in_bits & 0x7 == 0 && right_offset_in_bits & 0x7 == 0 {
+            if let Some(result) = Self::try_from_aligned_bitwise_binary_op(
+                &left.as_ref()[left_offset_in_bits / 8..], // aligned to byte 
boundary
+                &right.as_ref()[right_offset_in_bits / 8..],
+                len_in_bits,
+                &mut op,
+            ) {
+                return result;
+            }
+        }
+        let left_chunks = BitChunks::new(left.as_ref(), left_offset_in_bits, 
len_in_bits);
+        let right_chunks = BitChunks::new(right.as_ref(), 
right_offset_in_bits, len_in_bits);
+
+        let chunks = left_chunks
+            .iter()
+            .zip(right_chunks.iter())
+            .map(|(left, right)| op(left, right));
+        // Soundness: `BitChunks` is a `BitChunks` iterator which
+        // correctly reports its upper bound
+        let mut buffer = unsafe { MutableBuffer::from_trusted_len_iter(chunks) 
};
+
+        let remainder_bytes = 
util::bit_util::ceil(left_chunks.remainder_len(), 8);
+        let rem = op(left_chunks.remainder_bits(), 
right_chunks.remainder_bits());
+        // we are counting its starting from the least significant bit, to 
to_le_bytes should be correct
+        let rem = &rem.to_le_bytes()[0..remainder_bytes];
+        buffer.extend_from_slice(rem);
+
+        BooleanBuffer {
+            buffer: Buffer::from(buffer),
+            offset: 0,
+            len: len_in_bits,
+        }
+    }
+
+    /// Like [`Self::from_bitwise_binary_op`] but optimized for the case where 
the
+    /// inputs are aligned to byte boundaries
+    ///
+    /// Returns `None` if the inputs are not fully u64 aligned
+    fn try_from_aligned_bitwise_binary_op<F>(
+        left: &[u8],
+        right: &[u8],
+        len_in_bits: usize,
+        op: &mut F,
+    ) -> Option<Self>
+    where
+        F: FnMut(u64, u64) -> u64,
+    {
+        // Safety: all valid bytes are valid u64s
+        let (left_prefix, left_u64s, left_suffix) = unsafe { 
left.align_to::<u64>() };

Review Comment:
   TLDR is I don't really know how much of a difference this makes, and I am 
struggling with measuring the difference
   
   Right now, the benchmarks seems super sensitive to any change. For example, 
in the most recent version of this PR I simply moved the code and added the 
check for alignment. And this results in consistently reproducible slowdowns on 
some of the sliced bechmarks
   
   ```
   and              1.00    211.5±1.92ns        ? ?/sec    1.30    275.5±4.61ns 
       ? ?/sec
   and_sliced_1     1.00   1095.6±3.62ns        ? ?/sec    1.12  1228.6±12.32ns 
       ? ?/sec
   and_sliced_24    1.39    337.9±3.92ns        ? ?/sec    1.00    243.4±1.74ns 
       ? ?/sec
   ```
   
   > With that assumption, using slice::as_chunks and u64::from_le_bytes on 
each chunk might be simpler, and would then only require handling a suffix.
   
   
   I tried a version like this and will see how it performs
   
   ```rust
   
       /// Like [`Self::from_bitwise_binary_op`] but optimized for the case 
where the
       /// inputs are aligned to byte boundaries
       ///
       /// Returns `None` if the inputs are not fully u64 aligned
       fn try_from_aligned_bitwise_binary_op<F>(
           left: &[u8],
           right: &[u8],
           len_in_bits: usize,
           op: &mut F,
       ) -> Option<Self>
       where
           F: FnMut(u64, u64) -> u64,
       {
           // trim to length
           let len_in_bytes = ceil(len_in_bits, 8);
           let left = &left[0..len_in_bytes];
           let right = &right[0..len_in_bytes];
           // Safety: all valid bytes are valid u64s
           let (left_prefix, left_u64s, left_suffix) = unsafe { 
left.align_to::<u64>() };
           let (right_prefix, right_u64s, right_suffix) = unsafe { 
right.align_to::<u64>() };
           if left_prefix.is_empty()
               && right_prefix.is_empty()
               && left_suffix.is_empty()
               && right_suffix.is_empty()
           {
               // the buffers are word (64 bit) aligned, so use optimized Vec 
code.
               let result_u64s = left_u64s
                   .iter()
                   .zip(right_u64s.iter())
                   .map(|(l, r)| op(*l, *r))
                   .collect::<Vec<u64>>();
               Some(BooleanBuffer::new(
                   Buffer::from(result_u64s),
                   0,
                   len_in_bits,
               ))
           } else {
               let (left_slices, left_remainder) = left.as_chunks::<8>();
               let (right_slices, right_remainder) = right.as_chunks::<8>();
               debug_assert_eq!(left_slices.len(), right_slices.len());
               debug_assert_eq!(left_remainder.len(), right_remainder.len());
               let mut mutable_result = 
MutableBuffer::with_capacity(left_slices.len() * 8 + left_remainder.len());
               
mutable_result.extend_from_iter(left_slices.iter().zip(right_slices.iter())
                   .map(|(l,r)| {
                       op(u64::from_le_bytes(*l),
                          u64::from_le_bytes(*r))
               }));
               if !left_remainder.is_empty() {
                   let rem = op(
                       u64::from_le_bytes({
                           let mut bytes = [0u8; 8];
                           
bytes[..left_remainder.len()].copy_from_slice(left_remainder);
                           bytes
                       }),
                       u64::from_le_bytes({
                           let mut bytes = [0u8; 8];
                           
bytes[..right_remainder.len()].copy_from_slice(right_remainder);
                           bytes
                       }),
                   );
                   println!("copying {} remainder bytes: {:?}", 
left_remainder.len(), &rem.to_le_bytes()[..left_remainder.len()]);
                   
mutable_result.extend_from_slice(&rem.to_le_bytes()[..left_remainder.len()]);
               }
               Some(BooleanBuffer {
                   buffer: Buffer::from(mutable_result),
                   offset: 0,
                   len: len_in_bits,
               })
           }
       }
   
   ```
   
   Sadly, it seems like we are not going to be able to use `as_chunks`
   
   ``rust
   error: current MSRV (Minimum Supported Rust Version) is `1.85.0` but this 
item is stable since `1.88.0`
      --> arrow-buffer/src/buffer/boolean.rs:335:54
       |
   335 |             let (left_slices, left_remainder) = left.as_chunks::<8>();
       |                                                      ^^^^^^^^^^^^^^^^
       |
       = note: you may want to conditionally increase the MSRV considered by 
Clippy using the `clippy::msrv` attribute
       = help: for further information visit 
https://rust-lang.github.io/rust-clippy/rust-1.91.0/index.html#incompatible_msrv
       = note: `-D clippy::incompatible-msrv` implied by `-D warnings`
       = help: to override `-D warnings` add 
`#[allow(clippy::incompatible_msrv)]`
   
   
   ```
   



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to