rtpsw commented on code in PR #14352:
URL: https://github.com/apache/arrow/pull/14352#discussion_r1019898609
##########
cpp/src/arrow/compute/row/grouper.cc:
##########
@@ -44,7 +45,406 @@ namespace compute {
namespace {
-struct GrouperImpl : Grouper {
+inline const uint8_t* GetValuesAsBytes(const ArrayData& data, int64_t offset =
0) {
+ int64_t absolute_byte_offset = (data.offset + offset) *
data.type->byte_width();
+ return data.GetValues<uint8_t>(1, absolute_byte_offset);
+}
+
+inline const uint8_t* GetValuesAsBytes(const ArraySpan& data, int64_t offset =
0) {
+ int64_t absolute_byte_offset = (data.offset + offset) *
data.type->byte_width();
+ return data.GetValues<uint8_t>(1, absolute_byte_offset);
+}
+
+template <typename Value>
+Status CheckForGetNextSegment(const std::vector<Value>& values, int64_t length,
+ int64_t offset, const std::vector<TypeHolder>&
key_types) {
+ if (offset < 0 || offset > length) {
+ return Status::Invalid("invalid grouping segmenter offset: ", offset);
+ }
+ if (values.size() != key_types.size()) {
+ return Status::Invalid("expected batch size ", key_types.size(), " but got
",
+ values.size());
+ }
+ for (size_t i = 0; i < key_types.size(); i++) {
+ const auto& value = values[i];
+ const auto& key_type = key_types[i];
+ if (*value.type() != *key_type.type) {
+ return Status::Invalid("expected batch value ", i, " of type ",
*key_type.type,
+ " but got ", *value.type());
+ }
+ }
+ return Status::OK();
+}
+
+Status CheckForGetNextSegment(const ExecSpan& batch, int64_t offset,
+ const std::vector<TypeHolder>& key_types) {
+ return CheckForGetNextSegment(batch.values, batch.length, offset, key_types);
+}
+
+Status CheckForGetNextSegment(const ExecBatch& batch, int64_t offset,
+ const std::vector<TypeHolder>& key_types) {
+ return CheckForGetNextSegment(batch.values, batch.length, offset, key_types);
+}
+
+struct StatelessGroupingSegmenter : public GroupingSegmenter {
+ Status Reset() override { return Status::OK(); }
+};
+
+GroupingSegment MakeSegment(int64_t batch_length, int64_t offset, int64_t
length) {
+ return GroupingSegment{offset, length, offset + length >= batch_length};
+}
+
+int64_t GetMatchLength(const uint8_t* match_bytes, int64_t match_width,
+ const uint8_t* array_bytes, int64_t offset, int64_t
length) {
+ int64_t cursor, byte_cursor;
+ for (cursor = offset, byte_cursor = match_width * cursor; cursor < length;
+ cursor++, byte_cursor += match_width) {
+ if (memcmp(match_bytes, array_bytes + byte_cursor,
+ static_cast<size_t>(match_width)) != 0) {
+ break;
+ }
+ }
+ return std::min(cursor, length - offset);
+}
+
+Result<GroupingSegment> GetNextSegmentChunked(
+ const std::shared_ptr<ChunkedArray>& chunked_array, int64_t offset) {
+ if (offset >= chunked_array->length()) {
+ return MakeSegment(chunked_array->length(), chunked_array->length(), 0);
+ }
+ int64_t remaining_offset = offset;
+ const auto& arrays = chunked_array->chunks();
+ for (size_t i = 0; remaining_offset >= 0 && i < arrays.size(); i++) {
+ // look up chunk containing offset
+ int64_t array_length = arrays[i]->length();
+ if (remaining_offset < array_length) {
+ // found - switch to matching
+ int64_t match_width = arrays[i]->type()->byte_width();
+ const uint8_t* match_bytes = GetValuesAsBytes(*arrays[i]->data(),
remaining_offset);
+ int64_t total_match_length = 0;
+ for (; i < arrays.size(); i++) {
+ int64_t array_length = arrays[i]->length();
+ if (array_length <= 0) continue;
+ const uint8_t* array_bytes = GetValuesAsBytes(*arrays[i]->data());
+ int64_t match_length = GetMatchLength(match_bytes, match_width,
array_bytes,
+ remaining_offset, array_length);
+ total_match_length += match_length;
+ remaining_offset = 0;
+ if (match_length < array_length - remaining_offset) break;
+ }
+ return MakeSegment(chunked_array->length(), offset, total_match_length);
+ }
+ remaining_offset -= array_length;
+ }
+ return Status::Invalid("segmenting invalid chunked array value");
+}
+
+struct NoKeysGroupingSegmenter : public StatelessGroupingSegmenter {
+ static Result<std::unique_ptr<GroupingSegmenter>> Make() {
+ return std::make_unique<NoKeysGroupingSegmenter>();
+ }
+
+ template <typename Batch>
+ Result<GroupingSegment> GetNextSegmentImpl(const Batch& batch, int64_t
offset) {
+ ARROW_RETURN_NOT_OK(CheckForGetNextSegment(batch, offset, {}));
+ return MakeSegment(batch.length, offset, batch.length - offset);
+ }
+
+ Result<GroupingSegment> GetNextSegment(const ExecSpan& batch, int64_t
offset) override {
+ return GetNextSegmentImpl(batch, offset);
+ }
+
+ Result<GroupingSegment> GetNextSegment(const ExecBatch& batch,
+ int64_t offset) override {
+ return GetNextSegmentImpl(batch, offset);
+ }
+};
+
+struct SimpleKeyGroupingSegmenter : public StatelessGroupingSegmenter {
+ static Result<std::unique_ptr<GroupingSegmenter>> Make(
+ std::vector<TypeHolder> key_types) {
+ return std::make_unique<SimpleKeyGroupingSegmenter>(key_types);
+ }
+
+ explicit SimpleKeyGroupingSegmenter(std::vector<TypeHolder> key_types)
+ : key_types_(std::move(key_types)) {}
+
+ Status CheckType(const DataType& type) {
+ if (!is_fixed_width(type)) {
+ return Status::Invalid("SimpleKeyGroupingSegmenter does not support type
", type);
+ }
+ return Status::OK();
+ }
+
+ Result<GroupingSegment> GetNextSegment(const Scalar& scalar, int64_t offset,
+ int64_t length) {
+ ARROW_RETURN_NOT_OK(CheckType(*scalar.type));
+ if (!scalar.is_valid) {
+ return Status::Invalid("segmenting an invalid scalar");
+ }
+ return MakeSegment(length, 0, length);
+ }
+
+ Result<GroupingSegment> GetNextSegment(const DataType& array_type,
+ const uint8_t* array_bytes, int64_t
offset,
+ int64_t length) {
+ RETURN_NOT_OK(CheckType(array_type));
+ int64_t byte_width = array_type.byte_width();
+ int64_t match_length = GetMatchLength(array_bytes + offset * byte_width,
byte_width,
+ array_bytes, offset, length);
+ return MakeSegment(length, offset, match_length);
+ }
+
+ Result<GroupingSegment> GetNextSegment(const ExecSpan& batch, int64_t
offset) override {
+ ARROW_RETURN_NOT_OK(CheckForGetNextSegment(batch, offset, key_types_));
+ const auto& value = batch.values[0];
+ if (value.is_scalar()) {
+ return GetNextSegment(*value.scalar, offset, batch.length);
+ }
+ ARROW_DCHECK(value.is_array());
+ const auto& array = value.array;
+ if (array.GetNullCount() > 0) {
+ return Status::NotImplemented("segmenting a nullable array");
+ }
+ return GetNextSegment(*array.type, GetValuesAsBytes(array), offset,
batch.length);
+ }
+
+ Result<GroupingSegment> GetNextSegment(const ExecBatch& batch,
+ int64_t offset) override {
+ ARROW_RETURN_NOT_OK(CheckForGetNextSegment(batch, offset, key_types_));
+ const auto& value = batch.values[0];
+ if (value.is_scalar()) {
+ return GetNextSegment(*value.scalar(), offset, batch.length);
+ }
+ if (value.is_array()) {
+ auto array = value.array();
+ if (array->GetNullCount() > 0) {
+ return Status::NotImplemented("segmenting a nullable array");
+ }
+ return GetNextSegment(*array->type, GetValuesAsBytes(*array), offset,
batch.length);
+ }
+ if (value.is_chunked_array()) {
+ auto array = value.chunked_array();
+ if (array->null_count() > 0) {
+ return Status::NotImplemented("segmenting a nullable array");
+ }
+ return GetNextSegmentChunked(array, offset);
+ }
+ return Status::Invalid("segmenting unsupported value kind ", value.kind());
+ }
+
+ private:
+ const std::vector<TypeHolder> key_types_;
+};
+
+struct AnyKeysGroupingSegmenter : public StatelessGroupingSegmenter {
+ static Result<std::unique_ptr<GroupingSegmenter>> Make(
+ std::vector<TypeHolder> key_types, ExecContext* ctx) {
+ ARROW_RETURN_NOT_OK(Grouper::Make(key_types, ctx)); // check types
+ return std::make_unique<AnyKeysGroupingSegmenter>(key_types, ctx);
+ }
+
+ AnyKeysGroupingSegmenter(std::vector<TypeHolder> key_types, ExecContext* ctx)
+ : key_types_(std::move(key_types)), ctx_(ctx) {}
+
+ template <typename Batch>
+ Result<GroupingSegment> GetNextSegmentImpl(const Batch& batch, int64_t
offset) {
+ ARROW_RETURN_NOT_OK(CheckForGetNextSegment(batch, offset, key_types_));
+ // TODO: make Grouper support Reset(), so it can be cached instead of
recreated here
+ ARROW_ASSIGN_OR_RAISE(auto grouper, Grouper::Make(key_types_, ctx_));
+ ARROW_ASSIGN_OR_RAISE(auto datum, grouper->Consume(batch, offset));
+ if (datum.is_array()) {
+ const std::shared_ptr<ArrayData>& data = datum.array();
+ ARROW_DCHECK(data->GetNullCount() == 0);
+ ARROW_DCHECK(*data->type == *uint32()); // as long as Grouper::Consume
gives uint32
+ const uint32_t* values = data->GetValues<uint32_t>(1);
+ int64_t cursor;
+ for (cursor = 1; cursor < data->length; cursor++) {
+ if (values[0] != values[cursor]) break;
+ }
+ return MakeSegment(batch.length, offset, std::min(cursor, batch.length -
offset));
+ } else if (datum.is_chunked_array()) {
+ return GetNextSegmentChunked(datum.chunked_array(), offset);
+ } else {
+ return Status::Invalid("segmenting unsupported datum kind ",
datum.kind());
+ }
+ }
+
+ Result<GroupingSegment> GetNextSegment(const ExecSpan& batch, int64_t
offset) override {
+ return GetNextSegmentImpl(batch, offset);
+ }
+
+ Result<GroupingSegment> GetNextSegment(const ExecBatch& batch,
+ int64_t offset) override {
+ return GetNextSegmentImpl(batch, offset);
+ }
+
+ private:
+ const std::vector<TypeHolder> key_types_;
+ ExecContext* const ctx_;
+};
+
+Status CheckForConsume(const ExecSpan& batch, int64_t& consume_offset,
+ int64_t& consume_length) {
+ if (consume_offset < 0) {
+ return Status::Invalid("invalid grouper consume offset: ", consume_offset);
+ }
+ if (consume_length < 0) {
+ consume_length = batch.length - consume_offset;
+ }
+ return Status::OK();
+}
+
+} // namespace
+
+Result<std::unique_ptr<GroupingSegmenter>> GroupingSegmenter::Make(
+ std::vector<TypeHolder> key_types, ExecContext* ctx) {
+ if (key_types.size() == 0) {
+ return NoKeysGroupingSegmenter::Make();
+ } else if (key_types.size() == 1) {
+ const DataType* type = key_types[0].type;
+ if (type != NULLPTR && is_fixed_width(*type)) {
+ return SimpleKeyGroupingSegmenter::Make(std::move(key_types));
+ }
+ }
+ return AnyKeysGroupingSegmenter::Make(std::move(key_types), ctx);
+}
+
+namespace {
+
+struct BaseGrouper : public Grouper {
+ int IndexOfChunk(const ExecBatch& batch) {
+ int i = 0;
+ for (const auto& value : batch.values) {
+ if (value.is_chunked_array()) {
+ return i;
+ }
+ ++i;
+ }
+ return -1;
+ }
+
+ bool HasConsistentChunks(const ExecBatch& batch, int index_of_chunk) {
Review Comment:
The reason for requiring consistent chunks, which is defined as having the
same sequence of chunk lengths for each (chunked-array) value in the batch, is
that it ensures we can create an `ExecSpan` without fragmenting the chunks. For
example, if we had one value with chunks lengths [3, 5] and a second value with
chunk lengths [2, 6] then we'd need to fragment into 3 `ExecSpan` of lengths
[2, 1, 5] since they each must cover rows from one chunk. Of course, the
fragmentation could easily get much worse with many values having inconsistent
chunks, and the performance cost of fragmentation would be annoying. So, while
the requirement can be lifted using something like `MultipleChunkIterator`, I'm
not sure it is worth it. Let me know your thoughts.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]