wgtmac commented on code in PR #37400:
URL: https://github.com/apache/arrow/pull/37400#discussion_r1309005753
##########
cpp/src/parquet/properties.h:
##########
@@ -757,6 +844,23 @@ class PARQUET_EXPORT WriterProperties {
return false;
}
+ bool bloom_filter_enabled() const {
+ if (default_column_properties_.bloom_filter_enabled()) {
+ return true;
+ }
+ for (const auto& item : column_properties_) {
Review Comment:
nit: use `std::any_of`
##########
cpp/src/parquet/properties.h:
##########
@@ -757,6 +844,23 @@ class PARQUET_EXPORT WriterProperties {
return false;
}
+ bool bloom_filter_enabled() const {
+ if (default_column_properties_.bloom_filter_enabled()) {
Review Comment:
I don't think enabling bloom filter for all columns makes sense in any case.
Should we remove this line?
##########
cpp/src/parquet/properties.h:
##########
@@ -186,6 +200,24 @@ class PARQUET_EXPORT ColumnProperties {
page_index_enabled_ = page_index_enabled;
}
+ void set_bloom_filter_options(std::optional<BloomFilterOptions>
bloom_filter_options) {
Review Comment:
It does not seem necessary to pass a `std::optional`. By default bloom
filter is disabled so we don't need to pass `std::nullopt`.
##########
cpp/src/parquet/file_writer.cc:
##########
@@ -494,6 +542,8 @@ class FileSerializer : public ParquetFileWriter::Contents {
std::unique_ptr<RowGroupWriter> row_group_writer_;
std::unique_ptr<PageIndexBuilder> page_index_builder_;
std::unique_ptr<InternalFileEncryptor> file_encryptor_;
+ // Only one of the bloom filter writer is active at a time
Review Comment:
I didn't get the point from the comment.
##########
cpp/src/parquet/bloom_filter_builder.cc:
##########
@@ -0,0 +1,142 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// This module defines an abstract interface for iterating through pages in a
+// Parquet column chunk within a row group. It could be extended in the future
+// to iterate through all data pages in all chunks in a file.
+
+#include "parquet/bloom_filter_builder.h"
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "arrow/io/interfaces.h"
+
+#include "parquet/bloom_filter.h"
+#include "parquet/exception.h"
+#include "parquet/metadata.h"
+#include "parquet/properties.h"
+
+namespace parquet {
+
+class BloomFilterBuilderImpl : public BloomFilterBuilder {
+ public:
+ explicit BloomFilterBuilderImpl(const SchemaDescriptor* schema,
+ WriterProperties properties)
+ : schema_(schema), properties_(std::move(properties)) {}
+ /// Append a new row group to host all incoming bloom filters.
+ void AppendRowGroup() override;
+
+ BloomFilter* GetOrCreateBloomFilter(
+ int32_t column_ordinal, const BloomFilterOptions& bloom_filter_options)
override;
+
+ /// Serialize all bloom filters with header and bitset in the order of row
group and
+ /// column id. Column encryption is not implemented yet. The side effect is
that it
+ /// deletes all bloom filters after they have been flushed.
+ void WriteTo(::arrow::io::OutputStream* sink, BloomFilterLocation* location)
override;
+
+ void Finish() override { finished_ = true; }
+
+ private:
+ /// Make sure column ordinal is not out of bound and the builder is in good
state.
+ void CheckState(int32_t column_ordinal) const {
+ if (finished_) {
+ throw ParquetException("BloomFilterBuilder is already finished.");
+ }
+ if (column_ordinal < 0 || column_ordinal >= schema_->num_columns()) {
+ throw ParquetException("Invalid column ordinal: ", column_ordinal);
+ }
+ if (row_group_bloom_filters_.empty()) {
+ throw ParquetException("No row group appended to BloomFilterBuilder.");
+ }
+ if (schema_->Column(column_ordinal)->physical_type() == Type::BOOLEAN) {
+ throw ParquetException("BloomFilterBuilder not supports Boolean.");
+ }
+ }
+
+ const SchemaDescriptor* schema_;
+ WriterProperties properties_;
+ bool finished_ = false;
+
+ // vector: row_group_ordinal
+ // map: column_ordinal -> bloom filter
Review Comment:
What about using alias which is self-descriptive?
##########
cpp/src/parquet/metadata.h:
##########
@@ -508,21 +507,32 @@ class PARQUET_EXPORT RowGroupMetaDataBuilder {
std::unique_ptr<RowGroupMetaDataBuilderImpl> impl_;
};
+/// Alias type of page index and bloom filter location of a row group. The
index
+/// location is located by column ordinal. If the column does not have the
index,
+/// its value is set to std::nullopt.
+using RowGroupIndexLocation = std::vector<std::optional<IndexLocation>>;
+
+/// Alias type of page index and bloom filter location of a parquet file. The
+/// index location is located by the row group ordinal.
+using FileIndexLocation = std::map<size_t, RowGroupIndexLocation>;
+
/// \brief Public struct for location to all page indexes in a parquet file.
struct PageIndexLocation {
- /// Alias type of page index location of a row group. The index location
- /// is located by column ordinal. If the column does not have the page index,
- /// its value is set to std::nullopt.
- using RowGroupIndexLocation = std::vector<std::optional<IndexLocation>>;
- /// Alias type of page index location of a parquet file. The index location
- /// is located by the row group ordinal.
- using FileIndexLocation = std::map<size_t, RowGroupIndexLocation>;
/// Row group column index locations which uses row group ordinal as the key.
FileIndexLocation column_index_location;
/// Row group offset index locations which uses row group ordinal as the key.
FileIndexLocation offset_index_location;
};
+/// \brief Public struct for location to all bloom filters in a parquet file.
+struct BloomFilterLocation {
+ /// Row group bloom filter index locations which uses row group ordinal as
the key.
+ ///
+ /// Note: Before Parquet 2.10, the bloom filter index only have "offset".
But here
+ /// we use "IndexLocation" with length to support the future extension.
+ FileIndexLocation bloom_filter_location;
Review Comment:
Is it an overkill to use `FileIndexLocation`? The assumption of page index
is that usually all columns will create one, therefore a
`vector<optional<IndexLocation>>` is used. However, usually not all columns
will create bloom filter, using vector here may waste some spaces.
##########
cpp/src/parquet/properties.h:
##########
@@ -186,6 +200,24 @@ class PARQUET_EXPORT ColumnProperties {
page_index_enabled_ = page_index_enabled;
}
+ void set_bloom_filter_options(std::optional<BloomFilterOptions>
bloom_filter_options) {
+ if (bloom_filter_options) {
+ if (bloom_filter_options->fpp > 1.0 || bloom_filter_options->fpp < 0.0) {
+ throw ParquetException(
+ "Bloom Filter False positive probability must be between 0.0 and
1.0");
+ }
+ }
+ bloom_filter_options_ = bloom_filter_options;
+ }
+
+ void set_bloom_filter_enabled(bool bloom_filter_enabled) {
Review Comment:
Can we merge this with the above one?
##########
cpp/src/parquet/bloom_filter_builder.cc:
##########
@@ -0,0 +1,142 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// This module defines an abstract interface for iterating through pages in a
+// Parquet column chunk within a row group. It could be extended in the future
+// to iterate through all data pages in all chunks in a file.
+
+#include "parquet/bloom_filter_builder.h"
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "arrow/io/interfaces.h"
+
+#include "parquet/bloom_filter.h"
+#include "parquet/exception.h"
+#include "parquet/metadata.h"
+#include "parquet/properties.h"
+
+namespace parquet {
+
+class BloomFilterBuilderImpl : public BloomFilterBuilder {
+ public:
+ explicit BloomFilterBuilderImpl(const SchemaDescriptor* schema,
+ WriterProperties properties)
Review Comment:
```suggestion
const WriterProperties& properties)
```
##########
cpp/src/parquet/CMakeLists.txt:
##########
@@ -335,7 +336,7 @@ install(FILES
"${CMAKE_CURRENT_BINARY_DIR}/parquet_version.h"
add_parquet_test(internals-test
SOURCES
bloom_filter_test.cc
- bloom_filter_reader_test.cc
+ bloom_filter_parquet_test.cc
Review Comment:
What above merging `bloom_filter_parquet_test.cc` into
`bloom_filter_test.cc`?
##########
cpp/src/parquet/file_writer.cc:
##########
@@ -360,6 +384,10 @@ class FileSerializer : public ParquetFileWriter::Contents {
}
row_group_writer_.reset();
+ // In Parquet standard, the Bloom filter data can be stored before the
page indexes
+ // after all row groups or stored between row groups. We choose to store
it before
+ // the page indexes after all row groups.
Review Comment:
We can also say parquet-mr does the same thing.
##########
cpp/src/parquet/file_writer.cc:
##########
@@ -484,6 +516,22 @@ class FileSerializer : public ParquetFileWriter::Contents {
}
}
+ void WriteBloomFilter() {
+ if (bloom_filter_builder_ != nullptr) {
+ if (properties_->file_encryption_properties()) {
+ throw ParquetException("Encryption is not supported with bloom
filter");
Review Comment:
```suggestion
ParquetException::NYI("Encryption is not supported with bloom
filter");
```
##########
cpp/src/parquet/file_writer.cc:
##########
@@ -307,7 +321,17 @@ class RowGroupSerializer : public RowGroupWriter::Contents
{
auto codec_options = properties_->codec_options(path)
? (properties_->codec_options(path)).get()
: nullptr;
-
+ BloomFilter* bloom_filter = nullptr;
Review Comment:
These lines duplicate with line 162 above, can we consolidate them into
`GetOrCreateBloomFilter` call?
##########
cpp/src/parquet/bloom_filter_builder.cc:
##########
@@ -0,0 +1,142 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// This module defines an abstract interface for iterating through pages in a
+// Parquet column chunk within a row group. It could be extended in the future
+// to iterate through all data pages in all chunks in a file.
+
+#include "parquet/bloom_filter_builder.h"
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "arrow/io/interfaces.h"
+
+#include "parquet/bloom_filter.h"
+#include "parquet/exception.h"
+#include "parquet/metadata.h"
+#include "parquet/properties.h"
+
+namespace parquet {
+
+class BloomFilterBuilderImpl : public BloomFilterBuilder {
+ public:
+ explicit BloomFilterBuilderImpl(const SchemaDescriptor* schema,
+ WriterProperties properties)
+ : schema_(schema), properties_(std::move(properties)) {}
+ /// Append a new row group to host all incoming bloom filters.
+ void AppendRowGroup() override;
+
+ BloomFilter* GetOrCreateBloomFilter(
+ int32_t column_ordinal, const BloomFilterOptions& bloom_filter_options)
override;
+
+ /// Serialize all bloom filters with header and bitset in the order of row
group and
+ /// column id. Column encryption is not implemented yet. The side effect is
that it
+ /// deletes all bloom filters after they have been flushed.
+ void WriteTo(::arrow::io::OutputStream* sink, BloomFilterLocation* location)
override;
+
+ void Finish() override { finished_ = true; }
+
+ private:
+ /// Make sure column ordinal is not out of bound and the builder is in good
state.
+ void CheckState(int32_t column_ordinal) const {
+ if (finished_) {
+ throw ParquetException("BloomFilterBuilder is already finished.");
+ }
+ if (column_ordinal < 0 || column_ordinal >= schema_->num_columns()) {
+ throw ParquetException("Invalid column ordinal: ", column_ordinal);
+ }
+ if (row_group_bloom_filters_.empty()) {
+ throw ParquetException("No row group appended to BloomFilterBuilder.");
+ }
+ if (schema_->Column(column_ordinal)->physical_type() == Type::BOOLEAN) {
+ throw ParquetException("BloomFilterBuilder not supports Boolean.");
Review Comment:
```suggestion
throw ParquetException("BloomFilterBuilder does not support boolean
type.");
```
##########
cpp/src/parquet/bloom_filter.h:
##########
@@ -167,6 +167,12 @@ class PARQUET_EXPORT BloomFilter {
virtual ~BloomFilter() = default;
+ // Variant of const pointer argument to facilitate template
Review Comment:
Move these lines up to line 108? Then it looks more consistent with other
`uint64_t Hash(const T* value) const` overloads. FWIW, switching to const
reference looks better and I think it is OK to introduce minor breaking change
as the bloom filter feature is not complete.
##########
cpp/src/parquet/bloom_filter_builder.cc:
##########
@@ -0,0 +1,142 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// This module defines an abstract interface for iterating through pages in a
+// Parquet column chunk within a row group. It could be extended in the future
+// to iterate through all data pages in all chunks in a file.
+
+#include "parquet/bloom_filter_builder.h"
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "arrow/io/interfaces.h"
+
+#include "parquet/bloom_filter.h"
+#include "parquet/exception.h"
+#include "parquet/metadata.h"
+#include "parquet/properties.h"
+
+namespace parquet {
+
+class BloomFilterBuilderImpl : public BloomFilterBuilder {
+ public:
+ explicit BloomFilterBuilderImpl(const SchemaDescriptor* schema,
+ WriterProperties properties)
+ : schema_(schema), properties_(std::move(properties)) {}
+ /// Append a new row group to host all incoming bloom filters.
+ void AppendRowGroup() override;
+
+ BloomFilter* GetOrCreateBloomFilter(
+ int32_t column_ordinal, const BloomFilterOptions& bloom_filter_options)
override;
+
+ /// Serialize all bloom filters with header and bitset in the order of row
group and
+ /// column id. Column encryption is not implemented yet. The side effect is
that it
+ /// deletes all bloom filters after they have been flushed.
+ void WriteTo(::arrow::io::OutputStream* sink, BloomFilterLocation* location)
override;
+
+ void Finish() override { finished_ = true; }
+
+ private:
+ /// Make sure column ordinal is not out of bound and the builder is in good
state.
+ void CheckState(int32_t column_ordinal) const {
+ if (finished_) {
+ throw ParquetException("BloomFilterBuilder is already finished.");
+ }
+ if (column_ordinal < 0 || column_ordinal >= schema_->num_columns()) {
+ throw ParquetException("Invalid column ordinal: ", column_ordinal);
+ }
+ if (row_group_bloom_filters_.empty()) {
+ throw ParquetException("No row group appended to BloomFilterBuilder.");
+ }
+ if (schema_->Column(column_ordinal)->physical_type() == Type::BOOLEAN) {
+ throw ParquetException("BloomFilterBuilder not supports Boolean.");
+ }
+ }
+
+ const SchemaDescriptor* schema_;
+ WriterProperties properties_;
+ bool finished_ = false;
+
+ // vector: row_group_ordinal
+ // map: column_ordinal -> bloom filter
+ std::vector<std::map<int32_t, std::unique_ptr<BloomFilter>>>
row_group_bloom_filters_;
+};
+
+std::unique_ptr<BloomFilterBuilder> BloomFilterBuilder::Make(
+ const SchemaDescriptor* schema, const WriterProperties& properties) {
+ return std::unique_ptr<BloomFilterBuilder>(
+ new BloomFilterBuilderImpl(schema, properties));
+}
+
+void BloomFilterBuilderImpl::AppendRowGroup() {
row_group_bloom_filters_.emplace_back(); }
+
+BloomFilter* BloomFilterBuilderImpl::GetOrCreateBloomFilter(
+ int32_t column_ordinal, const BloomFilterOptions& bloom_filter_options) {
+ CheckState(column_ordinal);
+ std::unique_ptr<BloomFilter>& bloom_filter =
+ row_group_bloom_filters_.back()[column_ordinal];
+ if (bloom_filter == nullptr) {
+ auto block_split_bloom_filter =
+ std::make_unique<BlockSplitBloomFilter>(properties_.memory_pool());
+ block_split_bloom_filter->Init(BlockSplitBloomFilter::OptimalNumOfBytes(
+ bloom_filter_options.ndv, bloom_filter_options.fpp));
+ bloom_filter = std::move(block_split_bloom_filter);
+ }
+ return bloom_filter.get();
+}
+
+void BloomFilterBuilderImpl::WriteTo(::arrow::io::OutputStream* sink,
+ BloomFilterLocation* location) {
+ if (!finished_) {
+ throw ParquetException("Cannot call WriteTo() to unfinished
PageIndexBuilder.");
+ }
+ if (row_group_bloom_filters_.empty()) {
+ // Return quickly if there is no bloom filter
+ return;
+ }
+
+ for (size_t row_group_ordinal = 0; row_group_ordinal <
row_group_bloom_filters_.size();
+ ++row_group_ordinal) {
+ const auto& row_group_bloom_filters =
row_group_bloom_filters_[row_group_ordinal];
+ // the whole row group has no bloom filter
+ if (row_group_bloom_filters.empty()) {
+ continue;
+ }
+ bool has_valid_bloom_filter = false;
+ int num_columns = schema_->num_columns();
+ std::vector<std::optional<IndexLocation>> locations(num_columns,
std::nullopt);
+
+ // serialize bloom filter by ascending order of column id
Review Comment:
```suggestion
// serialize bloom filter in ascending order of column id
```
##########
cpp/src/parquet/file_writer.cc:
##########
@@ -484,6 +516,22 @@ class FileSerializer : public ParquetFileWriter::Contents {
}
}
+ void WriteBloomFilter() {
+ if (bloom_filter_builder_ != nullptr) {
+ if (properties_->file_encryption_properties()) {
+ throw ParquetException("Encryption is not supported with bloom
filter");
+ }
+ // Serialize page index after all row groups have been written and report
+ // location to the file metadata.
+ BloomFilterLocation bloom_filter_location;
+ bloom_filter_builder_->Finish();
+ bloom_filter_builder_->WriteTo(sink_.get(), &bloom_filter_location);
+ metadata_->SetBloomFilterLocation(bloom_filter_location);
+ // Release the memory for BloomFilter.
+ // bloom_filter_builder_ = nullptr;
Review Comment:
Why not?
Actually I think we'd better place bloom filters between row groups so we
can proactively release the memory of serialized row groups as early as
possible. But that can be a future optmization.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]