mapleFU commented on code in PR #37400:
URL: https://github.com/apache/arrow/pull/37400#discussion_r1843148451
##########
cpp/src/parquet/properties.h:
##########
@@ -564,6 +591,43 @@ class PARQUET_EXPORT WriterProperties {
return this->disable_statistics(path->ToDotString());
}
+ /// Disable bloom filter for the column specified by `path`.
+ /// Default disabled.
+ Builder* disable_bloom_filter(const std::string& path) {
+ bloom_filter_options_[path] = std::nullopt;
+ return this;
+ }
+
+ /// Disable bloom filter for the column specified by `path`.
+ /// Default enabled.
Review Comment:
That's my fault!
##########
cpp/src/parquet/arrow/arrow_reader_writer_test.cc:
##########
@@ -5723,5 +5731,213 @@ TEST_F(ParquetPageIndexRoundTripTest, EnablePerColumn) {
/*null_counts=*/{0}}));
}
+class ParquetBloomFilterRoundTripTest : public ::testing::Test,
+ public ParquetIndexRoundTripTest {
+ public:
+ void ReadBloomFilters(int expect_num_row_groups,
+ const std::set<int>& expect_columns_without_filter =
{}) {
+ auto reader =
ParquetFileReader::Open(std::make_shared<BufferReader>(buffer_));
+
+ auto metadata = reader->metadata();
+ ASSERT_EQ(expect_num_row_groups, metadata->num_row_groups());
+
+ auto& bloom_filter_reader = reader->GetBloomFilterReader();
+
+ for (int rg = 0; rg < metadata->num_row_groups(); ++rg) {
+ auto row_group_reader = bloom_filter_reader.RowGroup(rg);
+ ASSERT_NE(row_group_reader, nullptr);
+
+ for (int col = 0; col < metadata->num_columns(); ++col) {
+ bool expect_no_bloom_filter = expect_columns_without_filter.find(col)
!=
+ expect_columns_without_filter.cend();
+
+ auto bloom_filter = row_group_reader->GetColumnBloomFilter(col);
+ if (expect_no_bloom_filter) {
+ ASSERT_EQ(nullptr, bloom_filter);
+ } else {
+ ASSERT_NE(nullptr, bloom_filter);
+ bloom_filters_.push_back(std::move(bloom_filter));
+ }
+ }
+ }
+ }
+
+ template <typename ArrowType>
+ void VerifyBloomFilter(const BloomFilter* bloom_filter,
+ const ::arrow::ChunkedArray& chunked_array) {
+ for (auto value : ::arrow::stl::Iterate<ArrowType>(chunked_array)) {
+ if (value == std::nullopt) {
+ continue;
+ }
+ EXPECT_TRUE(bloom_filter->FindHash(bloom_filter->Hash(value.value())));
+ }
+ }
+
+ protected:
+ std::vector<std::unique_ptr<BloomFilter>> bloom_filters_;
+};
+
+TEST_F(ParquetBloomFilterRoundTripTest, SimpleRoundTrip) {
+ auto schema = ::arrow::schema(
+ {::arrow::field("c0", ::arrow::int64()), ::arrow::field("c1",
::arrow::utf8())});
+ BloomFilterOptions options;
+ options.ndv = 100;
+ auto writer_properties = WriterProperties::Builder()
+ .enable_bloom_filter_options(options, "c0")
+ ->enable_bloom_filter_options(options, "c1")
+ ->max_row_group_length(4)
+ ->build();
+ auto table = ::arrow::TableFromJSON(schema, {R"([
+ [1, "a"],
+ [2, "b"],
+ [3, "c"],
+ [null, "d"],
+ [5, null],
+ [6, "f"]
+ ])"});
+ WriteFile(writer_properties, table);
+
+ ReadBloomFilters(/*expect_num_row_groups=*/2);
+ ASSERT_EQ(4, bloom_filters_.size());
+ std::vector<int64_t> row_group_row_count{4, 2};
+ int64_t current_row = 0;
+ int64_t bloom_filter_idx = 0; // current index in `bloom_filters_`
+ for (int64_t row_group_id = 0; row_group_id < 2; ++row_group_id) {
+ {
+ ASSERT_NE(nullptr, bloom_filters_[bloom_filter_idx]);
+ auto col = table->column(0)->Slice(current_row,
row_group_row_count[row_group_id]);
+
VerifyBloomFilter<::arrow::Int64Type>(bloom_filters_[bloom_filter_idx].get(),
*col);
+ ++bloom_filter_idx;
+ }
+ {
+ ASSERT_NE(nullptr, bloom_filters_[bloom_filter_idx]);
+ auto col = table->column(1)->Slice(current_row,
row_group_row_count[row_group_id]);
+
VerifyBloomFilter<::arrow::StringType>(bloom_filters_[bloom_filter_idx].get(),
+ *col);
+ ++bloom_filter_idx;
+ }
+ current_row += row_group_row_count[row_group_id];
+ }
+}
+
+TEST_F(ParquetBloomFilterRoundTripTest, SimpleRoundTripDictionary) {
+ auto origin_schema = ::arrow::schema(
+ {::arrow::field("c0", ::arrow::int64()), ::arrow::field("c1",
::arrow::utf8())});
+ auto schema = ::arrow::schema(
+ {::arrow::field("c0", ::arrow::dictionary(::arrow::int64(),
::arrow::int64())),
+ ::arrow::field("c1", ::arrow::dictionary(::arrow::int64(),
::arrow::utf8()))});
+ bloom_filters_.clear();
+ BloomFilterOptions options;
+ options.ndv = 100;
+ auto writer_properties = WriterProperties::Builder()
+ .enable_bloom_filter_options(options, "c0")
+ ->enable_bloom_filter_options(options, "c1")
+ ->max_row_group_length(4)
+ ->build();
+ std::vector<std::string> contents = {R"([
+ [1, "a"],
+ [2, "b"],
+ [3, "c"],
+ [null, "d"],
+ [5, null],
+ [6, "f"]
+ ])"};
+ auto table = ::arrow::TableFromJSON(schema, contents);
+ auto non_dict_table = ::arrow::TableFromJSON(origin_schema, contents);
Review Comment:
done
##########
cpp/src/parquet/bloom_filter.h:
##########
@@ -167,6 +167,31 @@ class PARQUET_EXPORT BloomFilter {
virtual ~BloomFilter() = default;
+ // Variant of const reference argument to facilitate template
+
+ /// Compute hash for ByteArray value by using its plain encoding result.
+ ///
+ /// @param value the value to hash.
+ uint64_t Hash(const ByteArray& value) const { return Hash(&value); }
Review Comment:
done
##########
cpp/src/parquet/bloom_filter_builder.cc:
##########
@@ -0,0 +1,158 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+// This module defines an abstract interface for iterating through pages in a
+// Parquet column chunk within a row group. It could be extended in the future
+// to iterate through all data pages in all chunks in a file.
+
+#include "parquet/bloom_filter_builder.h"
+
+#include <map>
+#include <utility>
+#include <vector>
+
+#include "arrow/io/interfaces.h"
+
+#include "parquet/bloom_filter.h"
+#include "parquet/exception.h"
+#include "parquet/metadata.h"
+#include "parquet/properties.h"
+
+namespace parquet::internal {
+
+namespace {
+/// Column encryption for bloom filter is not implemented yet.
+class BloomFilterBuilderImpl : public BloomFilterBuilder {
+ public:
+ explicit BloomFilterBuilderImpl(const SchemaDescriptor* schema,
+ const WriterProperties* properties)
+ : schema_(schema), properties_(properties) {}
+ /// Append a new row group to host all incoming bloom filters.
+ void AppendRowGroup() override;
+
+ BloomFilter* GetOrCreateBloomFilter(int32_t column_ordinal) override;
+
+ /// Serialize all bloom filters with header and bitset in the order of row
group and
+ /// column id. The side effect is that it deletes all bloom filters after
they have
+ /// been flushed.
+ void WriteTo(::arrow::io::OutputStream* sink, BloomFilterLocation* location)
override;
+
+ BloomFilterBuilderImpl(const BloomFilterBuilderImpl&) = delete;
+ BloomFilterBuilderImpl(BloomFilterBuilderImpl&&) = default;
+
+ private:
+ /// Make sure column ordinal is not out of bound and the builder is in good
state.
+ void CheckState(int32_t column_ordinal) const {
+ if (finished_) {
+ throw ParquetException("BloomFilterBuilder is already finished.");
+ }
+ if (column_ordinal < 0 || column_ordinal >= schema_->num_columns()) {
+ throw ParquetException("Invalid column ordinal: ", column_ordinal);
+ }
+ if (file_bloom_filters_.empty()) {
+ throw ParquetException("No row group appended to BloomFilterBuilder.");
+ }
+ if (schema_->Column(column_ordinal)->physical_type() == Type::BOOLEAN) {
+ throw ParquetException("BloomFilterBuilder does not support boolean
type.");
+ }
+ }
+
+ const SchemaDescriptor* schema_;
+ const WriterProperties* properties_;
+ bool finished_ = false;
+
+ using RowGroupBloomFilters = std::map<int32_t, std::unique_ptr<BloomFilter>>;
+ // Using unique_ptr because the `std::unique_ptr<BloomFilter>` is not
copyable.
+ // MSVC has the issue below: https://github.com/microsoft/STL/issues/1036
+ // So we use `std::unique_ptr<std::map<>>` to avoid the issue.
+ std::vector<std::unique_ptr<RowGroupBloomFilters>> file_bloom_filters_;
+};
+
+void BloomFilterBuilderImpl::AppendRowGroup() {
+ if (finished_) {
+ throw ParquetException(
+ "Cannot call AppendRowGroup() to finished BloomFilterBuilder.");
+ }
+ file_bloom_filters_.emplace_back(std::make_unique<RowGroupBloomFilters>());
+}
+
+BloomFilter* BloomFilterBuilderImpl::GetOrCreateBloomFilter(int32_t
column_ordinal) {
+ CheckState(column_ordinal);
+ const ColumnDescriptor* column_descr = schema_->Column(column_ordinal);
+ DCHECK_NE(column_descr->physical_type(), Type::BOOLEAN);
+ auto bloom_filter_options_opt =
properties_->bloom_filter_options(column_descr->path());
Review Comment:
Yes, `bloom_filter_options_opt` is so lightweight, copying it is ok here?
##########
cpp/src/parquet/column_writer.cc:
##########
@@ -2363,12 +2390,153 @@ Status
TypedColumnWriterImpl<FLBAType>::WriteArrowDense(
return Status::OK();
}
+template <typename DType>
+void TypedColumnWriterImpl<DType>::UpdateBloomFilter(const T* values,
+ int64_t num_values) {
+ if (bloom_filter_) {
+ std::array<uint64_t, kHashBatchSize> hashes;
+ for (int64_t i = 0; i < num_values; i += kHashBatchSize) {
+ int64_t current_hash_batch_size = std::min(kHashBatchSize, num_values -
i);
+ bloom_filter_->Hashes(values, static_cast<int>(current_hash_batch_size),
+ hashes.data());
+ bloom_filter_->InsertHashes(hashes.data(),
+ static_cast<int>(current_hash_batch_size));
+ }
+ }
+}
+
+template <>
+void TypedColumnWriterImpl<FLBAType>::UpdateBloomFilter(const FLBA* values,
+ int64_t num_values) {
+ if (bloom_filter_) {
+ std::array<uint64_t, kHashBatchSize> hashes;
+ for (int64_t i = 0; i < num_values; i += kHashBatchSize) {
+ int64_t current_hash_batch_size = std::min(kHashBatchSize, num_values -
i);
+ bloom_filter_->Hashes(values, descr_->type_length(),
+ static_cast<int>(current_hash_batch_size),
hashes.data());
+ bloom_filter_->InsertHashes(hashes.data(),
+ static_cast<int>(current_hash_batch_size));
+ }
+ }
+}
+
+template <>
+void TypedColumnWriterImpl<BooleanType>::UpdateBloomFilter(const bool*,
int64_t) {
+ DCHECK(bloom_filter_ == nullptr);
+}
+
+template <typename DType>
+void TypedColumnWriterImpl<DType>::UpdateBloomFilterSpaced(const T* values,
+ int64_t num_values,
+ const uint8_t*
valid_bits,
+ int64_t
valid_bits_offset) {
+ if (bloom_filter_) {
+ std::array<uint64_t, kHashBatchSize> hashes;
+ ::arrow::internal::VisitSetBitRunsVoid(
+ valid_bits, valid_bits_offset, num_values, [&](int64_t position,
int64_t length) {
+ for (int64_t i = 0; i < length; i += kHashBatchSize) {
+ auto current_hash_batch_size = std::min(kHashBatchSize, length -
i);
+ bloom_filter_->Hashes(values + i + position,
+ static_cast<int>(current_hash_batch_size),
+ hashes.data());
+ bloom_filter_->InsertHashes(hashes.data(),
+
static_cast<int>(current_hash_batch_size));
+ }
+ });
+ }
+}
+
+template <>
+void TypedColumnWriterImpl<BooleanType>::UpdateBloomFilterSpaced(const bool*,
int64_t,
+ const
uint8_t*,
+ int64_t) {
+ DCHECK(bloom_filter_ == nullptr);
Review Comment:
done
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]