alamb commented on code in PR #8694:
URL: https://github.com/apache/arrow-rs/pull/8694#discussion_r2498717158


##########
arrow-array/src/array/byte_view_array.rs:
##########
@@ -512,18 +512,71 @@ impl<T: ByteViewType + ?Sized> GenericByteViewArray<T> {
             };
         }
 
-        // 3) Allocate exactly capacity for all non-inline data
-        let mut data_buf = Vec::with_capacity(total_large);
+        struct GcCopyGroup {
+            total_buffer_bytes: usize,
+            total_len: usize,
+        }
+
+        let mut groups = vec![];
+        let one_group = [GcCopyGroup {
+            total_buffer_bytes: total_large,
+            total_len: len,
+        }];
+        let gc_copy_groups = if total_large > i32::MAX as usize {
+            // Slow-path: need to split into multiple copy groups
+            let mut current_length = 0;
+            let mut current_elements = 0;
+
+            for view in self.views() {
+                let len = *view as u32;
+                if len > MAX_INLINE_VIEW_LEN {
+                    if current_length + len > i32::MAX as u32 {
+                        // Start a new group
+                        groups.push(GcCopyGroup {
+                            total_buffer_bytes: current_length as usize,
+                            total_len: current_elements,
+                        });
+                        current_length = 0;
+                        current_elements = 0;
+                    }
+                    current_length += len;
+                    current_elements += 1;
+                }
+            }
+            if current_elements != 0 {
+                groups.push(GcCopyGroup {
+                    total_buffer_bytes: current_length as usize,
+                    total_len: current_elements,
+                });
+            }
+            &groups
+        } else {
+            one_group.as_slice()
+        };
+        debug_assert!(gc_copy_groups.len() <= i32::MAX as usize);
+
+        // 3) Copy the buffers group by group
+        let mut views_buf = Vec::with_capacity(len);
+        let mut data_blocks = Vec::with_capacity(gc_copy_groups.len());
+
+        let mut current_view_idx = 0;
+
+        for (group_idx, gc_copy_group) in gc_copy_groups.iter().enumerate() {
+            let mut data_buf = 
Vec::with_capacity(gc_copy_group.total_buffer_bytes);
 
-        // 4) Iterate over views and process each inline/non-inline view
-        let views_buf: Vec<u128> = (0..len)
-            .map(|i| unsafe { self.copy_view_to_buffer(i, &mut data_buf) })
-            .collect();
+            // Directly push views to avoid intermediate Vec allocation
+            for view_idx in current_view_idx..current_view_idx + 
gc_copy_group.total_len {
+                let view =
+                    unsafe { self.copy_view_to_buffer(view_idx, group_idx as 
i32, &mut data_buf) };
+                views_buf.push(view);

Review Comment:
   However, since both `Vec`s are being modified at the same time, I couldn't 
figure out a way to use `collect` here -- I could use extend



##########
arrow-array/src/array/byte_view_array.rs:
##########
@@ -512,18 +512,71 @@ impl<T: ByteViewType + ?Sized> GenericByteViewArray<T> {
             };
         }
 
-        // 3) Allocate exactly capacity for all non-inline data
-        let mut data_buf = Vec::with_capacity(total_large);
+        struct GcCopyGroup {
+            total_buffer_bytes: usize,
+            total_len: usize,
+        }
+
+        let mut groups = vec![];
+        let one_group = [GcCopyGroup {
+            total_buffer_bytes: total_large,
+            total_len: len,
+        }];
+        let gc_copy_groups = if total_large > i32::MAX as usize {
+            // Slow-path: need to split into multiple copy groups
+            let mut current_length = 0;
+            let mut current_elements = 0;
+
+            for view in self.views() {
+                let len = *view as u32;
+                if len > MAX_INLINE_VIEW_LEN {
+                    if current_length + len > i32::MAX as u32 {
+                        // Start a new group
+                        groups.push(GcCopyGroup {
+                            total_buffer_bytes: current_length as usize,
+                            total_len: current_elements,
+                        });
+                        current_length = 0;
+                        current_elements = 0;
+                    }
+                    current_length += len;
+                    current_elements += 1;
+                }
+            }
+            if current_elements != 0 {
+                groups.push(GcCopyGroup {
+                    total_buffer_bytes: current_length as usize,
+                    total_len: current_elements,
+                });
+            }
+            &groups
+        } else {
+            one_group.as_slice()
+        };
+        debug_assert!(gc_copy_groups.len() <= i32::MAX as usize);
+
+        // 3) Copy the buffers group by group
+        let mut views_buf = Vec::with_capacity(len);
+        let mut data_blocks = Vec::with_capacity(gc_copy_groups.len());
+
+        let mut current_view_idx = 0;
+
+        for (group_idx, gc_copy_group) in gc_copy_groups.iter().enumerate() {
+            let mut data_buf = 
Vec::with_capacity(gc_copy_group.total_buffer_bytes);
 
-        // 4) Iterate over views and process each inline/non-inline view
-        let views_buf: Vec<u128> = (0..len)
-            .map(|i| unsafe { self.copy_view_to_buffer(i, &mut data_buf) })
-            .collect();
+            // Directly push views to avoid intermediate Vec allocation
+            for view_idx in current_view_idx..current_view_idx + 
gc_copy_group.total_len {
+                let view =
+                    unsafe { self.copy_view_to_buffer(view_idx, group_idx as 
i32, &mut data_buf) };
+                views_buf.push(view);

Review Comment:
   I think using collect doesn't allocate additional memory, but it can be 
faster than `push` because `collect` does the capacity check once where `push` 
has to check capacity on each call



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to