R-JunmingChen commented on code in PR #37100:
URL: https://github.com/apache/arrow/pull/37100#discussion_r1364757116
##########
cpp/src/arrow/compute/kernels/aggregate_basic_internal.h:
##########
@@ -912,6 +912,122 @@ struct NullMinMaxImpl : public ScalarAggregator {
}
};
+template <SimdLevel::type SimdLevel>
+struct DictionaryMinMaxImpl : public ScalarAggregator {
+ using ThisType = DictionaryMinMaxImpl<SimdLevel>;
+
+ DictionaryMinMaxImpl(std::shared_ptr<DataType> out_type,
ScalarAggregateOptions options)
+ : options(std::move(options)),
+ out_type(std::move(out_type)),
+ has_nulls(false),
+ count(0),
+ min(nullptr),
+ max(nullptr) {
+ this->options.min_count = std::max<uint32_t>(1, this->options.min_count);
+ }
+
+ Status Consume(KernelContext* ctx, const ExecSpan& batch) override {
+ if (batch[0].is_scalar()) {
+ return Status::NotImplemented("No min/max implemented for
DictionaryScalar");
+ }
+
+ DictionaryArray dict_arr(batch[0].array.ToArrayData());
+ ARROW_ASSIGN_OR_RAISE(auto compacted_arr,
dict_arr.Compact(ctx->memory_pool()));
+ const DictionaryArray& compacted_dict_arr =
+ checked_cast<const DictionaryArray&>(*compacted_arr);
+ if (compacted_dict_arr.length() - compacted_dict_arr.null_count() == 0) {
+ return Status::OK();
+ }
+ this->has_nulls |= compacted_dict_arr.null_count() > 0;
+ this->count += compacted_dict_arr.length() -
compacted_dict_arr.null_count();
+
+ std::shared_ptr<Scalar> dict_min;
+ std::shared_ptr<Scalar> dict_max;
+ if (compacted_dict_arr.length() - compacted_dict_arr.null_count() == 1) {
+ ARROW_ASSIGN_OR_RAISE(dict_min,
compacted_dict_arr.dictionary()->GetScalar(0));
Review Comment:
Since the `compacted_dict_arr.null_count()` only counts the indices and the
dictionary is compacted, the method should be correct. I agree it's premature
optimization, let's remove it.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]