emkornfield commented on a change in pull request #6985: URL: https://github.com/apache/arrow/pull/6985#discussion_r420528366
########## File path: cpp/src/arrow/util/bit_util_test.cc ########## @@ -315,6 +318,115 @@ TEST(FirstTimeBitmapWriter, NormalOperation) { } } +std::string BitmapToString(const uint8_t* bitmap, int64_t bit_count) { + return arrow::internal::Bitmap(bitmap, /*offset*/ 0, /*length=*/bit_count).ToString(); +} + +std::string BitmapToString(const std::vector<uint8_t>& bitmap, int64_t bit_count) { + return BitmapToString(bitmap.data(), bit_count); +} + +TEST(FirstTimeBitmapWriter, AppendWordOffsetOverwritesCorrectBitsOnExistingByte) { + auto check_append = [](const std::string& expected_bits, int64_t offset) { + std::vector<uint8_t> valid_bits = {0x00}; Review comment: I think I misunderstood the contract. updated tests. ########## File path: cpp/src/parquet/level_conversion.h ########## @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include <cstdint> + +#include "parquet/platform.h" + +namespace parquet { +namespace internal { + +void PARQUET_EXPORT DefinitionLevelsToBitmap( + const int16_t* def_levels, int64_t num_def_levels, const int16_t max_definition_level, + const int16_t max_repetition_level, int64_t* values_read, int64_t* null_count, + uint8_t* valid_bits, int64_t valid_bits_offset); + +// These APIs are likely to be revised as part of ARROW-8494 to reduce duplicate code. +// They currently represent minimal functionality for vectorized computation of definition +// levels. + +#if defined(ARROW_LITTLE_ENDIAN) +/// Builds a bitmap by applying predicate to the level vector provided. +/// +/// \param[in] levels Rep or def level array. +/// \param[in] num_levels The number of levels to process (must be [0, 64]) +/// \param[in] predicate The predicate to apply (must have the signature `bool +/// predicate(int16_t)`. +/// \returns The bitmap using least significant "bit" ordering. +/// +/// N.B. Correct byte ordering is dependent on little-endian architectures. +/// +template <typename Predicate> +uint64_t LevelsToBitmap(const int16_t* levels, int64_t num_levels, Predicate predicate) { + // Both clang and GCC can vectorize this automatically with SSE4/AVX2. + uint64_t mask = 0; + for (int x = 0; x < num_levels; x++) { + mask |= static_cast<int64_t>(predicate(levels[x]) ? 1 : 0) << x; Review comment: fixed the first point. I think this header might get exposed publicly at some point, so we can't use DCHECK I think. ########## File path: cpp/src/parquet/level_conversion.cc ########## @@ -0,0 +1,170 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#include "parquet/level_conversion.h" + +#include <algorithm> +#include <limits> +#if defined(ARROW_HAVE_BMI2) +#include <x86intrin.h> +#endif + +#include "arrow/util/bit_util.h" + +#include "parquet/exception.h" + +namespace parquet { +namespace internal { +namespace { +inline void CheckLevelRange(const int16_t* levels, int64_t num_levels, + const int16_t max_expected_level) { + int16_t min_level = std::numeric_limits<int16_t>::max(); + int16_t max_level = std::numeric_limits<int16_t>::min(); + for (int x = 0; x < num_levels; x++) { + min_level = std::min(levels[x], min_level); + max_level = std::max(levels[x], max_level); + } + if (ARROW_PREDICT_FALSE(num_levels > 0 && (min_level < 0 || max_level > max_level))) { + throw ParquetException("definition level exceeds maximum"); + } +} + +#if !defined(ARROW_HAVE_BMI2) + +inline void DefinitionLevelsToBitmapScalar( + const int16_t* def_levels, int64_t num_def_levels, const int16_t max_definition_level, + const int16_t max_repetition_level, int64_t* values_read, int64_t* null_count, + uint8_t* valid_bits, int64_t valid_bits_offset) { + // We assume here that valid_bits is large enough to accommodate the + // additional definition levels and the ones that have already been written + ::arrow::internal::BitmapWriter valid_bits_writer(valid_bits, valid_bits_offset, + num_def_levels); + + // TODO(itaiin): As an interim solution we are splitting the code path here + // between repeated+flat column reads, and non-repeated+nested reads. + // Those paths need to be merged in the future + for (int i = 0; i < num_def_levels; ++i) { + if (def_levels[i] == max_definition_level) { + valid_bits_writer.Set(); + } else if (max_repetition_level > 0) { + // repetition+flat case + if (def_levels[i] == (max_definition_level - 1)) { + valid_bits_writer.Clear(); + *null_count += 1; + } else { + continue; + } + } else { + // non-repeated+nested case + if (def_levels[i] < max_definition_level) { + valid_bits_writer.Clear(); + *null_count += 1; + } else { + throw ParquetException("definition level exceeds maximum"); + } + } + + valid_bits_writer.Next(); + } + valid_bits_writer.Finish(); + *values_read = valid_bits_writer.position(); +} +#endif + +template <bool has_repeated_parent> +void DefinitionLevelsToBitmapSimd(const int16_t* def_levels, int64_t num_def_levels, + const int16_t required_definition_level, + int64_t* values_read, int64_t* null_count, + uint8_t* valid_bits, int64_t valid_bits_offset) { + constexpr int64_t kBitMaskSize = 64; + ::arrow::internal::FirstTimeBitmapWriter writer(valid_bits, + /*start_offset=*/valid_bits_offset, + /*length=*/num_def_levels); + int64_t set_count = 0; + *values_read = 0; + while (num_def_levels > 0) { + int64_t batch_size = std::min(num_def_levels, kBitMaskSize); + CheckLevelRange(def_levels, batch_size, required_definition_level); + uint64_t defined_bitmap = internal::GreaterThanBitmap(def_levels, batch_size, + required_definition_level - 1); + if (has_repeated_parent) { +#if defined(ARROW_HAVE_BMI2) + // This is currently a specialized code path assuming only (nested) lists + // present through the leaf (i.e. no structs). + // Upper level code only calls this method + // when the leaf-values are nullable (otherwise no spacing is needed), + // Because only nested lists exists it is sufficient to know that the field + // was either null or included it (i.e. definition level > max_definitation_level + // -2) If there where structs mixed in, we need to know the def_level of the + // repeated parent so we can check for def_level > "def level of repeated parent". + uint64_t present_bitmap = internal::GreaterThanBitmap( + def_levels, batch_size, required_definition_level - 2); + uint64_t selected_bits = _pext_u64(defined_bitmap, present_bitmap); + writer.AppendWord(selected_bits, ::arrow::BitUtil::PopCount(present_bitmap)); + set_count += ::arrow::BitUtil::PopCount(selected_bits); +#else + assert(false && "must not execute this without BMI2"); +#endif + } else { + writer.AppendWord(defined_bitmap, batch_size); + set_count += ::arrow::BitUtil::PopCount(defined_bitmap); + } + def_levels += batch_size; + num_def_levels -= batch_size; + } + + *values_read = writer.position(); + *null_count += *values_read - set_count; + writer.Finish(); +} + +} // namespace + +void DefinitionLevelsToBitmap(const int16_t* def_levels, int64_t num_def_levels, + const int16_t max_definition_level, + const int16_t max_repetition_level, int64_t* values_read, + int64_t* null_count, uint8_t* valid_bits, + int64_t valid_bits_offset) { + if (max_repetition_level > 0) { Review comment: split this into two functions. ########## File path: cpp/src/parquet/level_conversion.cc ########## @@ -0,0 +1,170 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +#include "parquet/level_conversion.h" + +#include <algorithm> +#include <limits> +#if defined(ARROW_HAVE_BMI2) +#include <x86intrin.h> +#endif + +#include "arrow/util/bit_util.h" + +#include "parquet/exception.h" + +namespace parquet { +namespace internal { +namespace { +inline void CheckLevelRange(const int16_t* levels, int64_t num_levels, + const int16_t max_expected_level) { + int16_t min_level = std::numeric_limits<int16_t>::max(); + int16_t max_level = std::numeric_limits<int16_t>::min(); + for (int x = 0; x < num_levels; x++) { + min_level = std::min(levels[x], min_level); + max_level = std::max(levels[x], max_level); + } + if (ARROW_PREDICT_FALSE(num_levels > 0 && (min_level < 0 || max_level > max_level))) { + throw ParquetException("definition level exceeds maximum"); + } +} + +#if !defined(ARROW_HAVE_BMI2) + +inline void DefinitionLevelsToBitmapScalar( + const int16_t* def_levels, int64_t num_def_levels, const int16_t max_definition_level, + const int16_t max_repetition_level, int64_t* values_read, int64_t* null_count, + uint8_t* valid_bits, int64_t valid_bits_offset) { + // We assume here that valid_bits is large enough to accommodate the + // additional definition levels and the ones that have already been written + ::arrow::internal::BitmapWriter valid_bits_writer(valid_bits, valid_bits_offset, + num_def_levels); + + // TODO(itaiin): As an interim solution we are splitting the code path here + // between repeated+flat column reads, and non-repeated+nested reads. + // Those paths need to be merged in the future Review comment: To expand on this, this really need to accept a second repetition level which is repeated_parent_definition_level that would replace max_definition_level - 1 below. ########## File path: cpp/src/parquet/level_conversion.h ########## @@ -0,0 +1,67 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#pragma once + +#include <cstdint> + +#include "parquet/platform.h" + +namespace parquet { +namespace internal { + +void PARQUET_EXPORT DefinitionLevelsToBitmap( + const int16_t* def_levels, int64_t num_def_levels, const int16_t max_definition_level, + const int16_t max_repetition_level, int64_t* values_read, int64_t* null_count, + uint8_t* valid_bits, int64_t valid_bits_offset); + +// These APIs are likely to be revised as part of ARROW-8494 to reduce duplicate code. +// They currently represent minimal functionality for vectorized computation of definition +// levels. + +#if defined(ARROW_LITTLE_ENDIAN) +/// Builds a bitmap by applying predicate to the level vector provided. +/// +/// \param[in] levels Rep or def level array. +/// \param[in] num_levels The number of levels to process (must be [0, 64]) +/// \param[in] predicate The predicate to apply (must have the signature `bool +/// predicate(int16_t)`. +/// \returns The bitmap using least significant "bit" ordering. +/// +/// N.B. Correct byte ordering is dependent on little-endian architectures. +/// +template <typename Predicate> +uint64_t LevelsToBitmap(const int16_t* levels, int64_t num_levels, Predicate predicate) { + // Both clang and GCC can vectorize this automatically with SSE4/AVX2. + uint64_t mask = 0; + for (int x = 0; x < num_levels; x++) { + mask |= static_cast<int64_t>(predicate(levels[x]) ? 1 : 0) << x; + } + return mask; +} + +/// Builds a bitmap where each set bit indicates the corresponding level is greater +/// than rhs. +static inline uint64_t GreaterThanBitmap(const int16_t* levels, int64_t num_levels, + int16_t rhs) { + return LevelsToBitmap(levels, num_levels, [&](int16_t value) { return value > rhs; }); Review comment: done. ---------------------------------------------------------------- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. For queries about this service, please contact Infrastructure at: us...@infra.apache.org