rtpsw commented on code in PR #34311:
URL: https://github.com/apache/arrow/pull/34311#discussion_r1117885846
##########
cpp/src/arrow/compute/row/grouper.cc:
##########
@@ -39,12 +43,521 @@
namespace arrow {
using internal::checked_cast;
+using internal::PrimitiveScalarBase;
namespace compute {
namespace {
-struct GrouperImpl : Grouper {
+constexpr uint32_t kNoGroupId = std::numeric_limits<uint32_t>::max();
+
+using group_id_t = std::remove_const<decltype(kNoGroupId)>::type;
+using GroupIdType = CTypeTraits<group_id_t>::ArrowType;
+auto group_id_type = std::make_shared<GroupIdType>();
+
+inline const uint8_t* GetValuesAsBytes(const ArrayData& data, int64_t offset =
0) {
+ DCHECK_GT(data.type->byte_width(), 0);
+ int64_t absolute_byte_offset = (data.offset + offset) *
data.type->byte_width();
+ return data.GetValues<uint8_t>(1, absolute_byte_offset);
+}
+
+inline const uint8_t* GetValuesAsBytes(const ArraySpan& data, int64_t offset =
0) {
+ DCHECK_GT(data.type->byte_width(), 0);
+ int64_t absolute_byte_offset = (data.offset + offset) *
data.type->byte_width();
+ return data.GetValues<uint8_t>(1, absolute_byte_offset);
+}
+
+template <typename Value>
+Status CheckForGetNextSegment(const std::vector<Value>& values, int64_t length,
+ int64_t offset, const std::vector<TypeHolder>&
key_types) {
+ if (offset < 0 || offset > length) {
+ return Status::Invalid("invalid grouping segmenter offset: ", offset);
+ }
+ if (values.size() != key_types.size()) {
+ return Status::Invalid("expected batch size ", key_types.size(), " but got
",
+ values.size());
+ }
+ for (size_t i = 0; i < key_types.size(); i++) {
+ const auto& value = values[i];
+ const auto& key_type = key_types[i];
+ if (*value.type() != *key_type.type) {
+ return Status::Invalid("expected batch value ", i, " of type ",
*key_type.type,
+ " but got ", *value.type());
+ }
+ }
+ return Status::OK();
+}
+
+template <typename Batch>
+enable_if_t<std::is_same<Batch, ExecSpan>::value || std::is_same<Batch,
ExecBatch>::value,
+ Status>
+CheckForGetNextSegment(const Batch& batch, int64_t offset,
+ const std::vector<TypeHolder>& key_types) {
+ return CheckForGetNextSegment(batch.values, batch.length, offset, key_types);
+}
+
+struct BaseGroupingSegmenter : public GroupingSegmenter {
+ explicit BaseGroupingSegmenter(const std::vector<TypeHolder>& key_types)
+ : key_types_(key_types) {}
+
+ const std::vector<TypeHolder>& key_types() const override { return
key_types_; }
+
+ std::vector<TypeHolder> key_types_;
+};
+
+GroupingSegment MakeSegment(int64_t batch_length, int64_t offset, int64_t
length,
+ bool extends) {
+ return GroupingSegment{offset, length, offset + length >= batch_length,
extends};
+}
+
+int64_t GetMatchLength(const uint8_t* match_bytes, int64_t match_width,
Review Comment:
We could create an issue to add support for declaring ordered aggregation in
`AggregationNodeOptions`, in which case the input segment-keys ordering would
be enforced and binary-search optimizations applied.
##########
cpp/src/arrow/compute/row/grouper.cc:
##########
@@ -39,12 +43,521 @@
namespace arrow {
using internal::checked_cast;
+using internal::PrimitiveScalarBase;
namespace compute {
namespace {
-struct GrouperImpl : Grouper {
+constexpr uint32_t kNoGroupId = std::numeric_limits<uint32_t>::max();
+
+using group_id_t = std::remove_const<decltype(kNoGroupId)>::type;
+using GroupIdType = CTypeTraits<group_id_t>::ArrowType;
+auto group_id_type = std::make_shared<GroupIdType>();
+
+inline const uint8_t* GetValuesAsBytes(const ArrayData& data, int64_t offset =
0) {
+ DCHECK_GT(data.type->byte_width(), 0);
+ int64_t absolute_byte_offset = (data.offset + offset) *
data.type->byte_width();
+ return data.GetValues<uint8_t>(1, absolute_byte_offset);
+}
+
+inline const uint8_t* GetValuesAsBytes(const ArraySpan& data, int64_t offset =
0) {
+ DCHECK_GT(data.type->byte_width(), 0);
+ int64_t absolute_byte_offset = (data.offset + offset) *
data.type->byte_width();
+ return data.GetValues<uint8_t>(1, absolute_byte_offset);
+}
+
+template <typename Value>
+Status CheckForGetNextSegment(const std::vector<Value>& values, int64_t length,
+ int64_t offset, const std::vector<TypeHolder>&
key_types) {
+ if (offset < 0 || offset > length) {
+ return Status::Invalid("invalid grouping segmenter offset: ", offset);
+ }
+ if (values.size() != key_types.size()) {
+ return Status::Invalid("expected batch size ", key_types.size(), " but got
",
+ values.size());
+ }
+ for (size_t i = 0; i < key_types.size(); i++) {
+ const auto& value = values[i];
+ const auto& key_type = key_types[i];
+ if (*value.type() != *key_type.type) {
+ return Status::Invalid("expected batch value ", i, " of type ",
*key_type.type,
+ " but got ", *value.type());
+ }
+ }
+ return Status::OK();
+}
+
+template <typename Batch>
+enable_if_t<std::is_same<Batch, ExecSpan>::value || std::is_same<Batch,
ExecBatch>::value,
+ Status>
+CheckForGetNextSegment(const Batch& batch, int64_t offset,
+ const std::vector<TypeHolder>& key_types) {
+ return CheckForGetNextSegment(batch.values, batch.length, offset, key_types);
+}
+
+struct BaseGroupingSegmenter : public GroupingSegmenter {
+ explicit BaseGroupingSegmenter(const std::vector<TypeHolder>& key_types)
+ : key_types_(key_types) {}
+
+ const std::vector<TypeHolder>& key_types() const override { return
key_types_; }
+
+ std::vector<TypeHolder> key_types_;
+};
+
+GroupingSegment MakeSegment(int64_t batch_length, int64_t offset, int64_t
length,
+ bool extends) {
+ return GroupingSegment{offset, length, offset + length >= batch_length,
extends};
+}
+
+int64_t GetMatchLength(const uint8_t* match_bytes, int64_t match_width,
+ const uint8_t* array_bytes, int64_t offset, int64_t
length) {
+ int64_t cursor, byte_cursor;
+ for (cursor = offset, byte_cursor = match_width * cursor; cursor < length;
+ cursor++, byte_cursor += match_width) {
+ if (memcmp(match_bytes, array_bytes + byte_cursor,
+ static_cast<size_t>(match_width)) != 0) {
+ break;
+ }
+ }
+ return std::min(cursor, length - offset);
+}
+
+using ExtendFunc = std::function<bool(const void*)>;
+constexpr bool kDefaultExtends = true;
+constexpr bool kEmptyExtends = true;
+
+Result<GroupingSegment> GetNextSegmentChunked(
+ const std::shared_ptr<ChunkedArray>& chunked_array, int64_t offset,
+ ExtendFunc extend) {
+ if (offset >= chunked_array->length()) {
+ return MakeSegment(chunked_array->length(), chunked_array->length(), 0,
+ kEmptyExtends);
+ }
+ int64_t remaining_offset = offset;
+ const auto& arrays = chunked_array->chunks();
+ for (size_t i = 0; remaining_offset >= 0 && i < arrays.size(); i++) {
+ // look up chunk containing offset
+ int64_t array_length = arrays[i]->length();
+ if (remaining_offset < array_length) {
+ // found - switch to matching
+ int64_t match_width = arrays[i]->type()->byte_width();
+ const uint8_t* match_bytes = GetValuesAsBytes(*arrays[i]->data(),
remaining_offset);
+ int64_t total_match_length = 0;
+ for (; i < arrays.size(); i++) {
+ int64_t array_length = arrays[i]->length();
+ if (array_length <= 0) continue;
+ const uint8_t* array_bytes = GetValuesAsBytes(*arrays[i]->data());
+ int64_t match_length = GetMatchLength(match_bytes, match_width,
array_bytes,
+ remaining_offset, array_length);
+ total_match_length += match_length;
+ remaining_offset = 0;
+ if (match_length < array_length - remaining_offset) break;
+ }
+ bool extends = extend(match_bytes);
+ return MakeSegment(chunked_array->length(), offset, total_match_length,
extends);
+ }
+ remaining_offset -= array_length;
+ }
+ return Status::Invalid("segmenting invalid chunked array value");
+}
Review Comment:
Done.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]