emkornfield commented on a change in pull request #8219: URL: https://github.com/apache/arrow/pull/8219#discussion_r492835461
########## File path: cpp/src/parquet/column_writer.cc ########## @@ -1130,37 +1188,61 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, public TypedColumnWriter< return values_to_write; } + void MaybeCalculateValidityBits(const int16_t* def_levels, int64_t batch_size, + int64_t* out_values_to_write, + int64_t* out_spaced_values_to_write, + int64_t* null_count) { + if (bits_buffer_ == nullptr) { + if (!level_info_.HasNullableValues()) { + *out_values_to_write = batch_size; + *out_spaced_values_to_write = batch_size; + *null_count = 0; + } else { + for (int x = 0; x < batch_size; x++) { + *out_values_to_write += def_levels[x] == level_info_.def_level ? 1 : 0; + *out_spaced_values_to_write += + def_levels[x] >= level_info_.repeated_ancestor_def_level ? 1 : 0; + } + *null_count = *out_values_to_write - *out_spaced_values_to_write; + } + return; + } + // Shrink to fit possible causes another allocation, and would only be necessary + // on the last batch. + int64_t new_bitmap_size = BitUtil::BytesForBits(batch_size); + if (new_bitmap_size != bits_buffer_->size()) { + PARQUET_THROW_NOT_OK( + bits_buffer_->Resize(new_bitmap_size, /*shrink_to_fit=*/false)); + bits_buffer_->ZeroPadding(); + } + internal::ValidityBitmapInputOutput io; + io.valid_bits = bits_buffer_->mutable_data(); + io.values_read_upper_bound = batch_size; + internal::DefLevelsToBitmap(def_levels, batch_size, level_info_, &io); Review comment: you understand correctly. I didn't want to push def/rep levels further down the stack but that is a possibility. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org