mapleFU commented on code in PR #8694:
URL: https://github.com/apache/arrow-rs/pull/8694#discussion_r2506882184
##########
arrow-array/src/array/byte_view_array.rs:
##########
@@ -512,18 +512,85 @@ impl<T: ByteViewType + ?Sized> GenericByteViewArray<T> {
};
}
- // 3) Allocate exactly capacity for all non-inline data
- let mut data_buf = Vec::with_capacity(total_large);
+ let (views_buf, data_blocks) = if total_large < i32::MAX as usize {
+ // fast path, the entire data fits in a single buffer
+ // 3) Allocate exactly capacity for all non-inline data
+ let mut data_buf = Vec::with_capacity(total_large);
+
+ // 4) Iterate over views and process each inline/non-inline view
+ let views_buf: Vec<u128> = (0..len)
+ .map(|i| unsafe { self.copy_view_to_buffer(i, 0, &mut
data_buf) })
+ .collect();
+ let data_block = Buffer::from_vec(data_buf);
+ let data_blocks = vec![data_block];
+ (views_buf, data_blocks)
+ } else {
+ // slow path, need to split into multiple buffers
+
+ struct GcCopyGroup {
+ total_buffer_bytes: usize,
+ total_len: usize,
+ }
+
+ impl GcCopyGroup {
+ fn new(total_buffer_bytes: u32, total_len: usize) -> Self {
+ Self {
+ total_buffer_bytes: total_buffer_bytes as usize,
+ total_len,
+ }
+ }
+ }
- // 4) Iterate over views and process each inline/non-inline view
- let views_buf: Vec<u128> = (0..len)
- .map(|i| unsafe { self.copy_view_to_buffer(i, &mut data_buf) })
- .collect();
+ let mut groups = Vec::with_capacity(total_large / (i32::MAX as
usize) + 1);
Review Comment:
I think this might not good, and allocation count in this slow path might
not matters?
##########
arrow-array/src/array/byte_view_array.rs:
##########
@@ -512,18 +512,85 @@ impl<T: ByteViewType + ?Sized> GenericByteViewArray<T> {
};
}
- // 3) Allocate exactly capacity for all non-inline data
- let mut data_buf = Vec::with_capacity(total_large);
+ let (views_buf, data_blocks) = if total_large < i32::MAX as usize {
+ // fast path, the entire data fits in a single buffer
+ // 3) Allocate exactly capacity for all non-inline data
+ let mut data_buf = Vec::with_capacity(total_large);
+
+ // 4) Iterate over views and process each inline/non-inline view
+ let views_buf: Vec<u128> = (0..len)
+ .map(|i| unsafe { self.copy_view_to_buffer(i, 0, &mut
data_buf) })
+ .collect();
+ let data_block = Buffer::from_vec(data_buf);
+ let data_blocks = vec![data_block];
+ (views_buf, data_blocks)
+ } else {
+ // slow path, need to split into multiple buffers
+
+ struct GcCopyGroup {
+ total_buffer_bytes: usize,
+ total_len: usize,
+ }
+
+ impl GcCopyGroup {
+ fn new(total_buffer_bytes: u32, total_len: usize) -> Self {
+ Self {
+ total_buffer_bytes: total_buffer_bytes as usize,
+ total_len,
+ }
+ }
+ }
- // 4) Iterate over views and process each inline/non-inline view
- let views_buf: Vec<u128> = (0..len)
- .map(|i| unsafe { self.copy_view_to_buffer(i, &mut data_buf) })
- .collect();
+ let mut groups = Vec::with_capacity(total_large / (i32::MAX as
usize) + 1);
+ let mut current_length = 0;
+ let mut current_elements = 0;
+
+ for view in self.views() {
+ let len = *view as u32;
+ if len > MAX_INLINE_VIEW_LEN {
+ if current_length + len > i32::MAX as u32 {
+ // Start a new group
+ groups.push(GcCopyGroup::new(current_length,
current_elements));
+ current_length = 0;
+ current_elements = 0;
+ }
+ current_length += len;
+ current_elements += 1;
+ }
+ }
+ if current_elements != 0 {
+ groups.push(GcCopyGroup::new(current_length,
current_elements));
+ }
+ debug_assert!(groups.len() <= i32::MAX as usize);
+
+ // 3) Copy the buffers group by group
+ let mut views_buf = Vec::with_capacity(len);
+ let mut data_blocks = Vec::with_capacity(groups.len());
+
+ let mut current_view_idx = 0;
+
+ for (group_idx, gc_copy_group) in groups.iter().enumerate() {
+ let mut data_buf =
Vec::with_capacity(gc_copy_group.total_buffer_bytes);
+
+ // Directly push views to avoid intermediate Vec allocation
+ let new_views = (current_view_idx..current_view_idx +
gc_copy_group.total_len).map(
+ |view_idx| {
+ // safety: the view index came from iterating over
valid range
+ unsafe {
+ self.copy_view_to_buffer(view_idx, group_idx as
i32, &mut data_buf)
+ }
+ },
+ );
+ views_buf.extend(new_views);
+
+ data_blocks.push(Buffer::from_vec(data_buf));
+ current_view_idx += gc_copy_group.total_len;
+ }
+ (views_buf, data_blocks)
+ };
// 5) Wrap up buffers
Review Comment:
Should this change?
##########
arrow-array/src/array/byte_view_array.rs:
##########
@@ -512,18 +512,85 @@ impl<T: ByteViewType + ?Sized> GenericByteViewArray<T> {
};
}
- // 3) Allocate exactly capacity for all non-inline data
- let mut data_buf = Vec::with_capacity(total_large);
+ let (views_buf, data_blocks) = if total_large < i32::MAX as usize {
+ // fast path, the entire data fits in a single buffer
+ // 3) Allocate exactly capacity for all non-inline data
+ let mut data_buf = Vec::with_capacity(total_large);
+
+ // 4) Iterate over views and process each inline/non-inline view
+ let views_buf: Vec<u128> = (0..len)
+ .map(|i| unsafe { self.copy_view_to_buffer(i, 0, &mut
data_buf) })
+ .collect();
+ let data_block = Buffer::from_vec(data_buf);
+ let data_blocks = vec![data_block];
+ (views_buf, data_blocks)
+ } else {
+ // slow path, need to split into multiple buffers
Review Comment:
Would it better if I extract this into a new function?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]