pitrou commented on a change in pull request #8177:
URL: https://github.com/apache/arrow/pull/8177#discussion_r488815019
##########
File path: cpp/src/parquet/level_conversion.cc
##########
@@ -18,176 +18,205 @@
#include <algorithm>
#include <limits>
-#if defined(ARROW_HAVE_BMI2)
-#include <x86intrin.h>
-#endif
+#include "arrow/util/bit_run_reader.h"
#include "arrow/util/bit_util.h"
+#include "arrow/util/cpu_info.h"
#include "arrow/util/logging.h"
#include "parquet/exception.h"
+#include "parquet/level_comparison.h"
+#define PARQUET_IMPL_NAMESPACE standard
+#include "parquet/level_conversion_inc.h"
+#undef PARQUET_IMPL_NAMESPACE
+
namespace parquet {
namespace internal {
namespace {
-inline void CheckLevelRange(const int16_t* levels, int64_t num_levels,
- const int16_t max_expected_level) {
- int16_t min_level = std::numeric_limits<int16_t>::max();
- int16_t max_level = std::numeric_limits<int16_t>::min();
- for (int x = 0; x < num_levels; x++) {
- min_level = std::min(levels[x], min_level);
- max_level = std::max(levels[x], max_level);
- }
- if (ARROW_PREDICT_FALSE(num_levels > 0 &&
- (min_level < 0 || max_level > max_expected_level))) {
- throw ParquetException("definition level exceeds maximum");
- }
-}
-#if !defined(ARROW_HAVE_AVX512)
-
-inline void DefinitionLevelsToBitmapScalar(
- const int16_t* def_levels, int64_t num_def_levels, const int16_t
max_definition_level,
- const int16_t max_repetition_level, int64_t* values_read, int64_t*
null_count,
- uint8_t* valid_bits, int64_t valid_bits_offset) {
- // We assume here that valid_bits is large enough to accommodate the
- // additional definition levels and the ones that have already been written
- ::arrow::internal::BitmapWriter valid_bits_writer(valid_bits,
valid_bits_offset,
- num_def_levels);
-
- // TODO(itaiin): As an interim solution we are splitting the code path here
- // between repeated+flat column reads, and non-repeated+nested reads.
- // Those paths need to be merged in the future
- for (int i = 0; i < num_def_levels; ++i) {
- if (def_levels[i] == max_definition_level) {
+using ::arrow::internal::CpuInfo;
+
+#if !defined(ARROW_HAVE_RUNTIME_BMI2)
+void DefLevelsToBitmapScalar(const int16_t* def_levels, int64_t num_def_levels,
+ LevelInfo level_info, ValidityBitmapInputOutput*
output) {
+ ::arrow::internal::FirstTimeBitmapWriter valid_bits_writer(
+ output->valid_bits,
+ /*start_offset=*/output->valid_bits_offset,
+ /*length=*/num_def_levels);
+ for (int x = 0; x < num_def_levels; x++) {
+ // This indicates that a parent repeated element has zero
+ // length so the def level is not applicable to this column.
+ if (def_levels[x] < level_info.repeated_ancestor_def_level) {
+ continue;
+ }
+ if (ARROW_PREDICT_FALSE(valid_bits_writer.position() >=
+ output->values_read_upper_bound)) {
+ std::stringstream ss;
+ ss << "Definition levels exceeded upper bound: " <<
output->values_read_upper_bound;
+ throw ParquetException(ss.str());
+ }
+ if (def_levels[x] >= level_info.def_level) {
valid_bits_writer.Set();
- } else if (max_repetition_level > 0) {
- // repetition+flat case
- if (def_levels[i] == (max_definition_level - 1)) {
- valid_bits_writer.Clear();
- *null_count += 1;
- } else {
- continue;
- }
} else {
- // non-repeated+nested case
- if (def_levels[i] < max_definition_level) {
- valid_bits_writer.Clear();
- *null_count += 1;
- } else {
- throw ParquetException("definition level exceeds maximum");
- }
+ valid_bits_writer.Clear();
+ output->null_count += 1;
}
-
valid_bits_writer.Next();
}
valid_bits_writer.Finish();
- *values_read = valid_bits_writer.position();
+ output->values_read = valid_bits_writer.position();
+ if (output->null_count > 0 && level_info.null_slot_usage > 1) {
+ throw ParquetException(
+ "Null values with null_slot_usage > 1 not supported."
+ "(i.e. FixedSizeLists with null values are not supported");
+ }
}
#endif
-template <bool has_repeated_parent>
-int64_t DefinitionLevelsBatchToBitmap(const int16_t* def_levels, const int64_t
batch_size,
- const int16_t required_definition_level,
-
::arrow::internal::FirstTimeBitmapWriter* writer) {
- CheckLevelRange(def_levels, batch_size, required_definition_level);
- uint64_t defined_bitmap =
- internal::GreaterThanBitmap(def_levels, batch_size,
required_definition_level - 1);
-
- DCHECK_LE(batch_size, 64);
- if (has_repeated_parent) {
-#if defined(ARROW_HAVE_BMI2)
- // This is currently a specialized code path assuming only (nested) lists
- // present through the leaf (i.e. no structs). Upper level code only calls
- // this method when the leaf-values are nullable (otherwise no spacing is
needed),
- // Because only nested lists exists it is sufficient to know that the field
- // was either null or included it (i.e. definition level >
max_definitation_level
- // -2) If there where structs mixed in, we need to know the def_level of
the
- // repeated parent so we can check for def_level > "def level of repeated
parent".
- uint64_t present_bitmap = internal::GreaterThanBitmap(def_levels,
batch_size,
-
required_definition_level - 2);
- uint64_t selected_bits = _pext_u64(defined_bitmap, present_bitmap);
- writer->AppendWord(selected_bits,
::arrow::BitUtil::PopCount(present_bitmap));
- return ::arrow::BitUtil::PopCount(selected_bits);
-#else
- assert(false && "must not execute this without BMI2");
-#endif
- } else {
- writer->AppendWord(defined_bitmap, batch_size);
- return ::arrow::BitUtil::PopCount(defined_bitmap);
+template <typename OffsetType>
+void DefRepLevelsToListInfo(const int16_t* def_levels, const int16_t*
rep_levels,
+ int64_t num_def_levels, LevelInfo level_info,
+ ValidityBitmapInputOutput* output, OffsetType*
offsets) {
+ OffsetType* orig_pos = offsets;
+ std::unique_ptr<::arrow::internal::FirstTimeBitmapWriter> valid_bits_writer;
+ if (output->valid_bits) {
+ valid_bits_writer.reset(new ::arrow::internal::FirstTimeBitmapWriter(
+ output->valid_bits, output->valid_bits_offset, num_def_levels));
}
-}
+ for (int x = 0; x < num_def_levels; x++) {
+ // Skip items that belong to empty or null ancestor lists and further
nested lists.
+ if (def_levels[x] < level_info.repeated_ancestor_def_level ||
+ rep_levels[x] > level_info.rep_level) {
+ continue;
+ }
-template <bool has_repeated_parent>
-void DefinitionLevelsToBitmapSimd(const int16_t* def_levels, int64_t
num_def_levels,
- const int16_t required_definition_level,
- int64_t* values_read, int64_t* null_count,
- uint8_t* valid_bits, int64_t
valid_bits_offset) {
- constexpr int64_t kBitMaskSize = 64;
- ::arrow::internal::FirstTimeBitmapWriter writer(valid_bits,
-
/*start_offset=*/valid_bits_offset,
- /*length=*/num_def_levels);
- int64_t set_count = 0;
- *values_read = 0;
- while (num_def_levels > kBitMaskSize) {
- set_count += DefinitionLevelsBatchToBitmap<has_repeated_parent>(
- def_levels, kBitMaskSize, required_definition_level, &writer);
- def_levels += kBitMaskSize;
- num_def_levels -= kBitMaskSize;
- }
- set_count += DefinitionLevelsBatchToBitmap<has_repeated_parent>(
- def_levels, num_def_levels, required_definition_level, &writer);
+ if (rep_levels[x] == level_info.rep_level) {
+ // A continuation of an existing list.
+ // offsets can be null for structs with repeated children (we don't need
to know
+ // offsets until we get to the children).
+ if (offsets != nullptr) {
+ if (ARROW_PREDICT_FALSE(*offsets ==
std::numeric_limits<OffsetType>::max())) {
+ throw ParquetException("List index overflow.");
+ }
+ *offsets += 1;
+ }
+ } else {
+ if (ARROW_PREDICT_FALSE(
+ (valid_bits_writer != nullptr &&
+ valid_bits_writer->position() >=
output->values_read_upper_bound) ||
+ (offsets - orig_pos) >= output->values_read_upper_bound)) {
+ std::stringstream ss;
+ ss << "Definition levels exceeded upper bound: "
+ << output->values_read_upper_bound;
+ throw ParquetException(ss.str());
+ }
- *values_read = writer.position();
- *null_count += *values_read - set_count;
- writer.Finish();
+ // current_rep < list rep_level i.e. start of a list (ancestor empty
lists are
+ // filtered out above).
+ // offsets can be null for structs with repeated children (we don't need
to know
+ // offsets until we get to the children).
+ if (offsets != nullptr) {
+ ++offsets;
+ // Use cumulative offsets because variable size lists are more common
then
+ // fixed size lists so it should be cheaper to make these cumulative
and
+ // subtract when validating fixed size lists.
+ *offsets = *(offsets - 1);
+ if (def_levels[x] >= level_info.def_level) {
+ if (ARROW_PREDICT_FALSE(*offsets ==
std::numeric_limits<OffsetType>::max())) {
+ throw ParquetException("List index overflow.");
+ }
+ *offsets += 1;
+ }
+ }
+
+ if (valid_bits_writer != nullptr) {
+ // the level_info def level for lists reflects element present level.
+ // the prior level distinguishes between empty lists.
+ if (def_levels[x] >= level_info.def_level - 1) {
+ valid_bits_writer->Set();
+ } else {
+ output->null_count++;
+ valid_bits_writer->Clear();
+ }
+ valid_bits_writer->Next();
+ }
+ }
+ }
+ if (valid_bits_writer != nullptr) {
+ valid_bits_writer->Finish();
+ }
+ if (offsets != nullptr) {
+ output->values_read = offsets - orig_pos;
+ } else if (valid_bits_writer != nullptr) {
+ output->values_read = valid_bits_writer->position();
+ }
+ if (output->null_count > 0 && level_info.null_slot_usage > 1) {
+ throw ParquetException(
+ "Null values with null_slot_usage > 1 not supported."
+ "(i.e. FixedSizeLists with null values are not supported)");
+ }
}
-void DefinitionLevelsToBitmapLittleEndian(
- const int16_t* def_levels, int64_t num_def_levels, const int16_t
max_definition_level,
- const int16_t max_repetition_level, int64_t* values_read, int64_t*
null_count,
- uint8_t* valid_bits, int64_t valid_bits_offset) {
- if (max_repetition_level > 0) {
-// This is a short term hack to prevent using the pext BMI2 instructions
-// on non-intel platforms where performance is subpar.
-// In the medium term we will hopefully be able to runtime dispatch
-// to use this on intel only platforms that support pext.
-#if defined(ARROW_HAVE_AVX512)
- // BMI2 is required for efficient bit extraction.
- DefinitionLevelsToBitmapSimd</*has_repeated_parent=*/true>(
- def_levels, num_def_levels, max_definition_level, values_read,
null_count,
- valid_bits, valid_bits_offset);
-#else
- DefinitionLevelsToBitmapScalar(def_levels, num_def_levels,
max_definition_level,
- max_repetition_level, values_read,
null_count,
- valid_bits, valid_bits_offset);
-#endif // ARROW_HAVE_BMI2
+} // namespace
+#if defined(ARROW_HAVE_RUNTIME_BMI2)
+// defined in level_conversion_bmi2.cc for dynamic dispatch.
+void DefLevelsToBitmapBmi2WithRepeatedParent(const int16_t* def_levels,
+ int64_t num_def_levels, LevelInfo
level_info,
+ ValidityBitmapInputOutput*
output);
+#endif
+
+void DefLevelsToBitmap(const int16_t* def_levels, int64_t num_def_levels,
+ LevelInfo level_info, ValidityBitmapInputOutput*
output) {
+ // It is simpler to rely on rep_level here until PARQUET-1899 is done and
the code
+ // is deleted in a follow-up release.
+ if (level_info.rep_level > 0) {
+#if defined(ARROW_HAVE_RUNTIME_BMI2)
+ using FunctionType = decltype(&standard::DefLevelsToBitmapSimd<true>);
+ static FunctionType fn =
+ CpuInfo::GetInstance()->HasEfficientBmi2()
+ ? DefLevelsToBitmapBmi2WithRepeatedParent
+ : standard::DefLevelsToBitmapSimd</*has_repeated_parent=*/true>;
+ fn(def_levels, num_def_levels, level_info, output);
+#else
+ // This indicates we are likely on a big-endian platformat which don't
have a
Review comment:
"platform"
But the comment is mistaken: ARM is little-endian most of the time
(technically it supports both, but Linux runs it in little-endian mode AFAIK).
Also, I don't understand why `DefLevelsToBitmapScalar` is preferred here but
`DefLevelsToBitmapSimd` is preferred below? Don't the same arguments apply?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]