[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-25 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r494170006



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1130,37 +1188,61 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 return values_to_write;
   }
 
+  void MaybeCalculateValidityBits(const int16_t* def_levels, int64_t 
batch_size,
+  int64_t* out_values_to_write,
+  int64_t* out_spaced_values_to_write,
+  int64_t* null_count) {
+if (bits_buffer_ == nullptr) {
+  if (!level_info_.HasNullableValues()) {
+*out_values_to_write = batch_size;
+*out_spaced_values_to_write = batch_size;
+*null_count = 0;
+  } else {
+for (int x = 0; x < batch_size; x++) {
+  *out_values_to_write += def_levels[x] == level_info_.def_level ? 1 : 
0;
+  *out_spaced_values_to_write +=
+  def_levels[x] >= level_info_.repeated_ancestor_def_level ? 1 : 0;
+}
+*null_count = *out_values_to_write - *out_spaced_values_to_write;
+  }
+  return;
+}
+// Shrink to fit possible causes another allocation, and would only be 
necessary
+// on the last batch.
+int64_t new_bitmap_size = BitUtil::BytesForBits(batch_size);
+if (new_bitmap_size != bits_buffer_->size()) {
+  PARQUET_THROW_NOT_OK(
+  bits_buffer_->Resize(new_bitmap_size, /*shrink_to_fit=*/false));
+  bits_buffer_->ZeroPadding();
+}
+internal::ValidityBitmapInputOutput io;
+io.valid_bits = bits_buffer_->mutable_data();
+io.values_read_upper_bound = batch_size;
+internal::DefLevelsToBitmap(def_levels, batch_size, level_info_, );

Review comment:
   I see, thanks.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-24 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r494170006



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1130,37 +1188,61 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 return values_to_write;
   }
 
+  void MaybeCalculateValidityBits(const int16_t* def_levels, int64_t 
batch_size,
+  int64_t* out_values_to_write,
+  int64_t* out_spaced_values_to_write,
+  int64_t* null_count) {
+if (bits_buffer_ == nullptr) {
+  if (!level_info_.HasNullableValues()) {
+*out_values_to_write = batch_size;
+*out_spaced_values_to_write = batch_size;
+*null_count = 0;
+  } else {
+for (int x = 0; x < batch_size; x++) {
+  *out_values_to_write += def_levels[x] == level_info_.def_level ? 1 : 
0;
+  *out_spaced_values_to_write +=
+  def_levels[x] >= level_info_.repeated_ancestor_def_level ? 1 : 0;
+}
+*null_count = *out_values_to_write - *out_spaced_values_to_write;
+  }
+  return;
+}
+// Shrink to fit possible causes another allocation, and would only be 
necessary
+// on the last batch.
+int64_t new_bitmap_size = BitUtil::BytesForBits(batch_size);
+if (new_bitmap_size != bits_buffer_->size()) {
+  PARQUET_THROW_NOT_OK(
+  bits_buffer_->Resize(new_bitmap_size, /*shrink_to_fit=*/false));
+  bits_buffer_->ZeroPadding();
+}
+internal::ValidityBitmapInputOutput io;
+io.valid_bits = bits_buffer_->mutable_data();
+io.values_read_upper_bound = batch_size;
+internal::DefLevelsToBitmap(def_levels, batch_size, level_info_, );

Review comment:
   I see, thanks.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492558115



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the

Review comment:
   "single nullable element" perhaps?

##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;

Review comment:
   `array_nullable` refers to the parent, the root, the leaf? This is 
difficult to follow.

##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();

Review comment:
   Maybe avoid double negatives?

##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;

Review comment:
   Perhaps rename to `parent_nullable` or `root_nullable` or...

##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;
+bool maybe_parent_nulls =
+nested && !(leaf_is_not_nullable || leaf_nulls_are_canonical);

Review comment:
   Wait, if `nested` is false, is all this complicated dance required?

##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// 

[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492563521



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1130,37 +1188,61 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 return values_to_write;
   }
 
+  void MaybeCalculateValidityBits(const int16_t* def_levels, int64_t 
batch_size,
+  int64_t* out_values_to_write,
+  int64_t* out_spaced_values_to_write,
+  int64_t* null_count) {
+if (bits_buffer_ == nullptr) {
+  if (!level_info_.HasNullableValues()) {
+*out_values_to_write = batch_size;
+*out_spaced_values_to_write = batch_size;
+*null_count = 0;
+  } else {
+for (int x = 0; x < batch_size; x++) {
+  *out_values_to_write += def_levels[x] == level_info_.def_level ? 1 : 
0;
+  *out_spaced_values_to_write +=
+  def_levels[x] >= level_info_.repeated_ancestor_def_level ? 1 : 0;
+}
+*null_count = *out_values_to_write - *out_spaced_values_to_write;
+  }
+  return;
+}
+// Shrink to fit possible causes another allocation, and would only be 
necessary
+// on the last batch.
+int64_t new_bitmap_size = BitUtil::BytesForBits(batch_size);
+if (new_bitmap_size != bits_buffer_->size()) {
+  PARQUET_THROW_NOT_OK(
+  bits_buffer_->Resize(new_bitmap_size, /*shrink_to_fit=*/false));
+  bits_buffer_->ZeroPadding();
+}
+internal::ValidityBitmapInputOutput io;
+io.valid_bits = bits_buffer_->mutable_data();
+io.values_read_upper_bound = batch_size;
+internal::DefLevelsToBitmap(def_levels, batch_size, level_info_, );

Review comment:
   Are we generating a new bitmap from the definition levels after we 
generated the definition levels from the existing bitmaps... just to decode the 
bitmap again in `WriteValuesSpaced`?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492560707



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;
+bool maybe_parent_nulls =
+nested && !(leaf_is_not_nullable || leaf_nulls_are_canonical);
+if (maybe_parent_nulls) {
+  ARROW_ASSIGN_OR_RAISE(
+  bits_buffer_,
+  arrow::AllocateResizableBuffer(
+  BitUtil::BytesForBits(properties_->write_batch_size()), 
ctx->memory_pool));
+  bits_buffer_->ZeroPadding();
+  std::static_pointer_cast(AllocateBuffer(allocator_, 0));

Review comment:
   Is this allocating a new (temporary?) validity buffer for each write 
batch?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492560707



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;
+bool maybe_parent_nulls =
+nested && !(leaf_is_not_nullable || leaf_nulls_are_canonical);
+if (maybe_parent_nulls) {
+  ARROW_ASSIGN_OR_RAISE(
+  bits_buffer_,
+  arrow::AllocateResizableBuffer(
+  BitUtil::BytesForBits(properties_->write_batch_size()), 
ctx->memory_pool));
+  bits_buffer_->ZeroPadding();
+  std::static_pointer_cast(AllocateBuffer(allocator_, 0));

Review comment:
   Is this allocating a new validity buffer for each write batch?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492560172



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;
+bool maybe_parent_nulls =
+nested && !(leaf_is_not_nullable || leaf_nulls_are_canonical);

Review comment:
   Wait, if `nested` is false, is all this complicated dance required?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492559517



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;

Review comment:
   Perhaps rename to `parent_nullable` or `root_nullable` or...





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492558836



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the
+// leaf.
+bool leaf_nulls_are_canonical =
+(level_info_.def_level == level_info_.repeated_ancestor_def_level + 1) 
&&
+array_nullable;

Review comment:
   `array_nullable` refers to the parent, the root, the leaf? This is 
difficult to follow.





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492559005



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();

Review comment:
   Maybe avoid double negatives?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org




[GitHub] [arrow] pitrou commented on a change in pull request #8219: ARROW-9603: [C++] Fix parquet write to not assume leaf-array validity bitmaps have the same values as parent structs

2020-09-22 Thread GitBox


pitrou commented on a change in pull request #8219:
URL: https://github.com/apache/arrow/pull/8219#discussion_r492558115



##
File path: cpp/src/parquet/column_writer.cc
##
@@ -1009,12 +1046,33 @@ class TypedColumnWriterImpl : public ColumnWriterImpl, 
public TypedColumnWriter<
 
   Status WriteArrow(const int16_t* def_levels, const int16_t* rep_levels,
 int64_t num_levels, const ::arrow::Array& array,
-ArrowWriteContext* ctx) override {
+ArrowWriteContext* ctx, bool nested, bool array_nullable) 
override {
+BEGIN_PARQUET_CATCH_EXCEPTIONS
+bool leaf_is_not_nullable = !level_info_.HasNullableValues();
+// Leaf nulls are canonical when there is only a single null element and 
it is at the

Review comment:
   "single nullable element" perhaps?





This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org