wgtmac commented on code in PR #37400: URL: https://github.com/apache/arrow/pull/37400#discussion_r1569005583
########## cpp/src/parquet/bloom_filter_builder.h: ########## @@ -0,0 +1,84 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include "arrow/io/type_fwd.h" +#include "parquet/types.h" + +namespace parquet { + +class BloomFilter; +class SchemaDescriptor; +struct BloomFilterOptions; +struct BloomFilterLocation; + +/// \brief Interface for collecting bloom filter of a parquet file. +/// +/// ``` Review Comment: Do we need these docstrings? It is not supposed to be used by downstream users. ########## cpp/src/parquet/column_writer.cc: ########## @@ -2363,12 +2390,151 @@ Status TypedColumnWriterImpl<FLBAType>::WriteArrowDense( return Status::OK(); } +template <typename DType> +void TypedColumnWriterImpl<DType>::UpdateBloomFilter(const T* values, + int64_t num_values) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + for (int64_t i = 0; i < num_values; i += kHashBatchSize) { + int64_t current_hash_batch_size = std::min(kHashBatchSize, num_values - i); + bloom_filter_->Hashes(values, static_cast<int>(current_hash_batch_size), + hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + } +} + +template <> +void TypedColumnWriterImpl<FLBAType>::UpdateBloomFilter(const FLBA* values, + int64_t num_values) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + for (int64_t i = 0; i < num_values; i += kHashBatchSize) { + int64_t current_hash_batch_size = std::min(kHashBatchSize, num_values - i); + bloom_filter_->Hashes(values, descr_->type_length(), + static_cast<int>(current_hash_batch_size), hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + } +} + +template <> +void TypedColumnWriterImpl<BooleanType>::UpdateBloomFilter(const bool*, int64_t) { + DCHECK(bloom_filter_ == nullptr); +} + +template <typename DType> +void TypedColumnWriterImpl<DType>::UpdateBloomFilterSpaced(const T* values, + int64_t num_values, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + ::arrow::internal::VisitSetBitRunsVoid( + valid_bits, valid_bits_offset, num_values, [&](int64_t position, int64_t length) { + for (int64_t i = 0; i < length; i += kHashBatchSize) { + auto current_hash_batch_size = std::min(kHashBatchSize, length - i); + bloom_filter_->Hashes(values + i + position, + static_cast<int>(current_hash_batch_size), + hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + }); + } +} + +template <> +void TypedColumnWriterImpl<BooleanType>::UpdateBloomFilterSpaced(const bool*, int64_t, + const uint8_t*, + int64_t) { + DCHECK(bloom_filter_ == nullptr); +} + +template <> +void TypedColumnWriterImpl<FLBAType>::UpdateBloomFilterSpaced(const FLBA* values, + int64_t num_values, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + ::arrow::internal::VisitSetBitRunsVoid( + valid_bits, valid_bits_offset, num_values, [&](int64_t position, int64_t length) { + for (int64_t i = 0; i < length; i += kHashBatchSize) { + auto current_hash_batch_size = std::min(kHashBatchSize, length - i); + bloom_filter_->Hashes(values + i + position, descr_->type_length(), + static_cast<int>(current_hash_batch_size), + hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + }); + } +} + +template <typename ArrayType> +void UpdateBinaryBloomFilter(BloomFilter* bloom_filter, const ArrayType& array) { + // Using a smaller size because an extra `byte_arrays` are used. + constexpr int64_t kBinaryHashBatchSize = 64; + std::array<ByteArray, kBinaryHashBatchSize> byte_arrays; + std::array<uint64_t, kBinaryHashBatchSize> hashes; + int hashes_idx = 0; + auto flush_hashes = [&]() { + DCHECK_NE(0, hashes_idx); + bloom_filter->Hashes(byte_arrays.data(), static_cast<int>(hashes_idx), hashes.data()); + bloom_filter->InsertHashes(hashes.data(), static_cast<int>(hashes_idx)); + hashes_idx = 0; + }; + PARQUET_THROW_NOT_OK(::arrow::VisitArraySpanInline<typename ArrayType::TypeClass>( + *array.data(), + [&](const std::string_view& view) { Review Comment: ```suggestion [&](std::string_view view) { ``` ########## cpp/src/parquet/column_writer.cc: ########## @@ -2363,12 +2390,151 @@ Status TypedColumnWriterImpl<FLBAType>::WriteArrowDense( return Status::OK(); } +template <typename DType> +void TypedColumnWriterImpl<DType>::UpdateBloomFilter(const T* values, + int64_t num_values) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + for (int64_t i = 0; i < num_values; i += kHashBatchSize) { + int64_t current_hash_batch_size = std::min(kHashBatchSize, num_values - i); + bloom_filter_->Hashes(values, static_cast<int>(current_hash_batch_size), + hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + } +} + +template <> +void TypedColumnWriterImpl<FLBAType>::UpdateBloomFilter(const FLBA* values, + int64_t num_values) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + for (int64_t i = 0; i < num_values; i += kHashBatchSize) { + int64_t current_hash_batch_size = std::min(kHashBatchSize, num_values - i); + bloom_filter_->Hashes(values, descr_->type_length(), + static_cast<int>(current_hash_batch_size), hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + } +} + +template <> +void TypedColumnWriterImpl<BooleanType>::UpdateBloomFilter(const bool*, int64_t) { + DCHECK(bloom_filter_ == nullptr); +} + +template <typename DType> +void TypedColumnWriterImpl<DType>::UpdateBloomFilterSpaced(const T* values, + int64_t num_values, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + ::arrow::internal::VisitSetBitRunsVoid( + valid_bits, valid_bits_offset, num_values, [&](int64_t position, int64_t length) { + for (int64_t i = 0; i < length; i += kHashBatchSize) { + auto current_hash_batch_size = std::min(kHashBatchSize, length - i); + bloom_filter_->Hashes(values + i + position, + static_cast<int>(current_hash_batch_size), + hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + }); + } +} + +template <> +void TypedColumnWriterImpl<BooleanType>::UpdateBloomFilterSpaced(const bool*, int64_t, + const uint8_t*, + int64_t) { + DCHECK(bloom_filter_ == nullptr); +} + +template <> +void TypedColumnWriterImpl<FLBAType>::UpdateBloomFilterSpaced(const FLBA* values, + int64_t num_values, + const uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (bloom_filter_) { + std::array<uint64_t, kHashBatchSize> hashes; + ::arrow::internal::VisitSetBitRunsVoid( + valid_bits, valid_bits_offset, num_values, [&](int64_t position, int64_t length) { + for (int64_t i = 0; i < length; i += kHashBatchSize) { + auto current_hash_batch_size = std::min(kHashBatchSize, length - i); + bloom_filter_->Hashes(values + i + position, descr_->type_length(), + static_cast<int>(current_hash_batch_size), + hashes.data()); + bloom_filter_->InsertHashes(hashes.data(), + static_cast<int>(current_hash_batch_size)); + } + }); + } +} + +template <typename ArrayType> +void UpdateBinaryBloomFilter(BloomFilter* bloom_filter, const ArrayType& array) { + // Using a smaller size because an extra `byte_arrays` are used. + constexpr int64_t kBinaryHashBatchSize = 64; + std::array<ByteArray, kBinaryHashBatchSize> byte_arrays; + std::array<uint64_t, kBinaryHashBatchSize> hashes; + int hashes_idx = 0; + auto flush_hashes = [&]() { + DCHECK_NE(0, hashes_idx); + bloom_filter->Hashes(byte_arrays.data(), static_cast<int>(hashes_idx), hashes.data()); + bloom_filter->InsertHashes(hashes.data(), static_cast<int>(hashes_idx)); + hashes_idx = 0; + }; + PARQUET_THROW_NOT_OK(::arrow::VisitArraySpanInline<typename ArrayType::TypeClass>( + *array.data(), + [&](const std::string_view& view) { + if (hashes_idx == kHashBatchSize) { + flush_hashes(); + } + byte_arrays[hashes_idx] = view; + ++hashes_idx; + return Status::OK(); + }, + []() { return Status::OK(); })); + if (hashes_idx != 0) { + flush_hashes(); + } +} + +template <> +void TypedColumnWriterImpl<ByteArrayType>::UpdateBloomFilterArray( + const ::arrow::Array& values) { + if (bloom_filter_) { + if (!::arrow::is_base_binary_like(values.type_id())) { + throw ParquetException("Only BaseBinaryArray and subclasses supported"); + } + + if (::arrow::is_binary_like(values.type_id())) { + UpdateBinaryBloomFilter(bloom_filter_, + checked_cast<const ::arrow::BinaryArray&>(values)); + } else { + DCHECK(::arrow::is_large_binary_like(values.type_id())); Review Comment: Is it still safe to use DCHECK here? Arrow has string view array now and it may easily break here if we are going to support that type. ########## cpp/src/parquet/bloom_filter_builder.h: ########## @@ -0,0 +1,72 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +// This module defines an abstract interface for iterating through pages in a +// Parquet column chunk within a row group. It could be extended in the future +// to iterate through all data pages in all chunks in a file. + +#pragma once + +#include "arrow/io/interfaces.h" +#include "parquet/types.h" + +namespace parquet { + +class BloomFilter; +class SchemaDescriptor; +struct BloomFilterOptions; +struct BloomFilterLocation; + +namespace schema { +class ColumnPath; +} + +/// \brief Interface for collecting bloom filter of a parquet file. +class PARQUET_EXPORT BloomFilterBuilder { Review Comment: Where is the anonymous namespace? -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
