mapleFU commented on code in PR #39818:
URL: https://github.com/apache/arrow/pull/39818#discussion_r1471340271


##########
cpp/src/parquet/column_reader.cc:
##########
@@ -1478,16 +1480,28 @@ class TypedRecordReader : public 
TypedColumnReaderImpl<DType>,
     // We skipped the levels by incrementing 'levels_position_'. For values
     // we do not have a buffer, so we need to read them and throw them away.
     // First we need to figure out how many present/not-null values there are.
-    std::shared_ptr<::arrow::ResizableBuffer> valid_bits;
-    valid_bits = AllocateBuffer(this->pool_);
-    
PARQUET_THROW_NOT_OK(valid_bits->Resize(bit_util::BytesForBits(skipped_records),
-                                            /*shrink_to_fit=*/true));
+    int64_t buffer_size = bit_util::BytesForBits(skipped_records);
+    if (valid_bits_for_skip_ == nullptr) {
+      // Preallocate kMaxSkipLevelBufferSize would help minimizing allocations.
+      valid_bits_for_skip_ = AllocateBuffer(
+          this->pool_, std::max<int64_t>(buffer_size, 
kMaxSkipLevelBufferSize));
+    } else if (buffer_size > kMaxSkipLevelBufferSize) {
+      // Increase the bitmap size.
+      PARQUET_THROW_NOT_OK(valid_bits_for_skip_->Resize(buffer_size,
+                                                        
/*shrink_to_fit=*/false));

Review Comment:
   A bit weird because it cannot shrink here?



##########
cpp/src/parquet/column_reader.cc:
##########
@@ -70,6 +70,8 @@ namespace {
 // The minimum number of repetition/definition levels to decode at a time, for
 // better vectorized performance when doing many smaller record reads
 constexpr int64_t kMinLevelBatchSize = 1024;
+// The max buffer size of validility bitmap for skipping buffered levels.
+constexpr int64_t kMaxSkipLevelBufferSize = 128;

Review Comment:
   Other part is LGTM, however, when batchSize is always greater than 1024, 
this optimization would be a waste of memory?



##########
cpp/src/parquet/column_reader.cc:
##########
@@ -1478,16 +1480,28 @@ class TypedRecordReader : public 
TypedColumnReaderImpl<DType>,
     // We skipped the levels by incrementing 'levels_position_'. For values
     // we do not have a buffer, so we need to read them and throw them away.
     // First we need to figure out how many present/not-null values there are.
-    std::shared_ptr<::arrow::ResizableBuffer> valid_bits;
-    valid_bits = AllocateBuffer(this->pool_);
-    
PARQUET_THROW_NOT_OK(valid_bits->Resize(bit_util::BytesForBits(skipped_records),
-                                            /*shrink_to_fit=*/true));
+    int64_t buffer_size = bit_util::BytesForBits(skipped_records);
+    if (valid_bits_for_skip_ == nullptr) {
+      // Preallocate kMaxSkipLevelBufferSize would help minimizing allocations.
+      valid_bits_for_skip_ = AllocateBuffer(
+          this->pool_, std::max<int64_t>(buffer_size, 
kMaxSkipLevelBufferSize));
+    } else if (buffer_size > kMaxSkipLevelBufferSize) {
+      // Increase the bitmap size.
+      PARQUET_THROW_NOT_OK(valid_bits_for_skip_->Resize(buffer_size,
+                                                        
/*shrink_to_fit=*/false));
+    }
     ValidityBitmapInputOutput validity_io;
     validity_io.values_read_upper_bound = skipped_records;
-    validity_io.valid_bits = valid_bits->mutable_data();
+    validity_io.valid_bits = valid_bits_for_skip_->mutable_data();
     validity_io.valid_bits_offset = 0;
     DefLevelsToBitmap(def_levels() + start_levels_position, skipped_records,
                       this->leaf_info_, &validity_io);
+    if (buffer_size > kMaxSkipLevelBufferSize) {
+      // Shrink to kMaxSkipLevelBufferSize bytes per column in case there are 
numerous

Review Comment:
   🤔so this is preventing from using to much memory when the schema with lots 
of columns?



##########
cpp/src/parquet/column_reader.cc:
##########
@@ -1478,16 +1480,28 @@ class TypedRecordReader : public 
TypedColumnReaderImpl<DType>,
     // We skipped the levels by incrementing 'levels_position_'. For values
     // we do not have a buffer, so we need to read them and throw them away.
     // First we need to figure out how many present/not-null values there are.
-    std::shared_ptr<::arrow::ResizableBuffer> valid_bits;
-    valid_bits = AllocateBuffer(this->pool_);
-    
PARQUET_THROW_NOT_OK(valid_bits->Resize(bit_util::BytesForBits(skipped_records),
-                                            /*shrink_to_fit=*/true));
+    int64_t buffer_size = bit_util::BytesForBits(skipped_records);
+    if (valid_bits_for_skip_ == nullptr) {
+      // Preallocate kMaxSkipLevelBufferSize would help minimizing allocations.
+      valid_bits_for_skip_ = AllocateBuffer(
+          this->pool_, std::max<int64_t>(buffer_size, 
kMaxSkipLevelBufferSize));
+    } else if (buffer_size > kMaxSkipLevelBufferSize) {
+      // Increase the bitmap size.
+      PARQUET_THROW_NOT_OK(valid_bits_for_skip_->Resize(buffer_size,
+                                                        
/*shrink_to_fit=*/false));
+    }

Review Comment:
   Add a `DCHECK_GT(valid_bits_for_skip_->size(), buffer_size);` here?



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to