alamb commented on code in PR #9058:
URL: https://github.com/apache/arrow-datafusion/pull/9058#discussion_r1470295968
##########
datafusion/execution/src/memory_pool/proxy.rs:
##########
@@ -36,24 +55,37 @@ pub trait VecAllocExt {
/// Note this calculation is not recursive, and does not include any heap
/// allocations contained within the Vec's elements. Does not include the
/// size of `self`
+ ///
+ /// # Example:
+ /// ```
+ /// # use datafusion_execution::memory_pool::proxy::VecAllocExt;
+ /// let mut vec = Vec::new();
+ /// // Push data into the vec and the accounting will be updated to reflect
+ /// // memory allocation
+ /// vec.push(1);
+ /// assert_eq!(vec.allocated_size(), 16); // space for 4 u32s
+ /// vec.push(1);
+ /// assert_eq!(vec.allocated_size(), 16); // no new allocation needed
+ ///
+ /// // push more data into the vec
+ /// for _ in 0..10 { vec.push(1); }
+ /// assert_eq!(vec.allocated_size(), 64); // space for 64 now
+ /// ```
fn allocated_size(&self) -> usize;
}
impl<T> VecAllocExt for Vec<T> {
type T = T;
fn push_accounted(&mut self, x: Self::T, accounting: &mut usize) {
- if self.capacity() == self.len() {
- // allocate more
-
- // growth factor: 2, but at least 2 elements
- let bump_elements = (self.capacity() * 2).max(2);
- let bump_size = std::mem::size_of::<u32>() * bump_elements;
- self.reserve(bump_elements);
+ let prev_capacty = self.capacity();
+ self.push(x);
+ let new_capacity = self.capacity();
+ if new_capacity > prev_capacty {
+ // capacity changed, so we allocated more
+ let bump_size = (new_capacity - prev_capacty) *
std::mem::size_of::<T>();
Review Comment:
I think that is a good point. I will add a comment to clarify
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]