tustvold commented on code in PR #4305:
URL: https://github.com/apache/arrow-rs/pull/4305#discussion_r1210125275


##########
object_store/src/lib.rs:
##########
@@ -1237,8 +1249,8 @@ mod tests {
         assert_eq!(bytes_expected, bytes_written);
 
         // Can overwrite some storage
-        // Sizes carefully chosen to exactly hit min limit of 5 MiB
-        let data = get_vec_of_bytes(242_880, 22);
+        // Sizes chozen to ensure we write three parts

Review Comment:
   ```suggestion
           // Sizes chosen to ensure we write three parts
   ```



##########
object_store/src/multipart.rs:
##########
@@ -158,13 +170,18 @@ where
         // Poll current tasks
         self.as_mut().poll_tasks(cx)?;
 
-        // If adding buf to pending buffer would trigger send, check
-        // whether we have capacity for another task.
-        let enough_to_send =
-            (buf.len() + self.current_buffer.len()) >= self.min_part_size;
-        if enough_to_send && self.tasks.len() < self.max_concurrency {
-            // If we do, copy into the buffer and submit the task, and return 
ready.
-            self.current_buffer.extend_from_slice(buf);
+        let mut offset = 0;
+
+        loop {
+            // Fill up current buffer
+            offset += self.as_mut().add_to_buffer(buf, offset);
+
+            // If we don't have a full buffer or we have too many tasks, break
+            if self.current_buffer.len() < self.part_size
+                || self.tasks.len() >= self.max_concurrency
+            {
+                break;
+            }
 
             let out_buffer = std::mem::take(&mut self.current_buffer);

Review Comment:
   ```suggestion
               let out_buffer = std::mem::replace(&mut self.current_buffer, 
Vec::with_capacity(self.part_size));
   ```
   Perhaps



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to