zanmato1984 commented on code in PR #45001:
URL: https://github.com/apache/arrow/pull/45001#discussion_r1883314088
##########
cpp/src/arrow/compute/kernels/CMakeLists.txt:
##########
@@ -87,6 +88,7 @@ add_arrow_benchmark(scalar_round_benchmark PREFIX
"arrow-compute")
add_arrow_benchmark(scalar_set_lookup_benchmark PREFIX "arrow-compute")
add_arrow_benchmark(scalar_string_benchmark PREFIX "arrow-compute")
add_arrow_benchmark(scalar_temporal_benchmark PREFIX "arrow-compute")
+add_arrow_benchmark(scalar_hash_benchmark PREFIX "arrow-compute")
Review Comment:
Keep alphabetical order for this one too.
##########
cpp/src/arrow/compute/kernels/CMakeLists.txt:
##########
@@ -73,6 +73,7 @@ add_arrow_compute_test(scalar_utility_test
scalar_random_test.cc
scalar_set_lookup_test.cc
scalar_validity_test.cc
+ scalar_hash_test.cc
Review Comment:
```suggestion
scalar_hash_test.cc
scalar_random_test.cc
scalar_set_lookup_test.cc
scalar_validity_test.cc
```
##########
cpp/src/arrow/compute/kernels/CMakeLists.txt:
##########
@@ -73,6 +73,7 @@ add_arrow_compute_test(scalar_utility_test
scalar_random_test.cc
scalar_set_lookup_test.cc
scalar_validity_test.cc
+ scalar_hash_test.cc
Review Comment:
Might want to keep source list in alphabetical.
##########
cpp/src/arrow/compute/api_scalar.cc:
##########
@@ -934,6 +934,12 @@ Result<Datum> MapLookup(const Datum& arg, MapLookupOptions
options, ExecContext*
return CallFunction("map_lookup", {arg}, &options, ctx);
}
+// ----------------------------------------------------------------------
+// Hash functions
+Result<Datum> Hash64(const Datum& input_array, ExecContext* ctx) {
+ return CallFunction("hash_64", {input_array}, ctx);
Review Comment:
Should we consider naming it `hash64` instead of `hash_64`?
##########
cpp/src/arrow/compute/api_scalar.h:
##########
@@ -1776,5 +1776,21 @@ ARROW_EXPORT Result<Datum> NanosecondsBetween(const
Datum& left, const Datum& ri
/// \note API not yet finalized
ARROW_EXPORT Result<Datum> MapLookup(const Datum& map, MapLookupOptions
options,
ExecContext* ctx = NULLPTR);
+
+/// \brief Construct a hash value for each row of the input.
+///
+/// The result is an Array of length equal to the length of the input;
however, the output
+/// shall be a UInt64Array, with each element being a hash constructed from
each row of
+/// the input. If the input Array is a NestedArray, this means that each
"attribute" or
+/// "field" of the input NestedArray corresponding to the same "row" will
collectively
+/// produce a single uint64_t hash. At the moment, this function does not take
options,
+/// though these may be added in the future.
+///
+/// \param[in] input_array input data to hash
+/// \param[in] ctx function execution context, optional
Review Comment:
```suggestion
/// \param[in] ctx function execution context, optional
```
##########
cpp/src/arrow/compute/kernels/scalar_hash.cc:
##########
@@ -0,0 +1,204 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include <algorithm>
+
+#include "arrow/array/array_base.h"
+#include "arrow/array/builder_primitive.h"
+#include "arrow/compute/kernels/common_internal.h"
+#include "arrow/compute/key_hash_internal.h"
+#include "arrow/compute/light_array_internal.h"
+#include "arrow/compute/util.h"
+#include "arrow/result.h"
+
+namespace arrow {
+namespace compute {
+namespace internal {
+
+// Define symbols visible within `arrow::compute::internal` in this file;
+// these symbols are not visible outside of this file.
+namespace {
+
+// Function documentation
+const FunctionDoc hash_64_doc{
+ "Construct a hash for every element of the input argument",
+ ("An element-wise function that uses an xxHash-like algorithm.\n"
+ "This function is not suitable for cryptographic purposes.\n"
+ "Hash results are 64-bit and emitted for each valid row.\n"
+ "Null (or invalid) rows emit a null in the output."),
+ {"hash_input"}};
+
+// ------------------------------
+// Kernel implementations
+// It is expected that HashArrowType is either UInt32Type or UInt64Type
(default)
+// template <typename HashArrowType = UInt64Type>
+struct FastHashScalar {
+ static Result<KeyColumnArray> ToColumnArray(
+ const ArraySpan& array, LightContext* ctx,
+ const uint8_t* list_values_buffer = nullptr) {
+ KeyColumnMetadata metadata;
+ const uint8_t* validity_buffer = nullptr;
+ const uint8_t* fixed_length_buffer = nullptr;
+ const uint8_t* var_length_buffer = nullptr;
+
+ if (array.GetBuffer(0) != nullptr) {
+ validity_buffer = array.GetBuffer(0)->data();
+ }
+ if (array.GetBuffer(1) != nullptr) {
+ fixed_length_buffer = array.GetBuffer(1)->data();
+ }
+
+ auto type_id = array.type->id();
+ if (type_id == Type::NA) {
+ metadata = KeyColumnMetadata(true, 0, true);
+ } else if (type_id == Type::BOOL) {
+ metadata = KeyColumnMetadata(true, 0);
+ } else if (is_fixed_width(type_id)) {
+ metadata = KeyColumnMetadata(true, array.type->bit_width() / 8);
+ } else if (is_binary_like(type_id)) {
+ metadata = KeyColumnMetadata(false, sizeof(uint32_t));
+ var_length_buffer = array.GetBuffer(2)->data();
+ } else if (is_large_binary_like(type_id)) {
+ metadata = KeyColumnMetadata(false, sizeof(uint64_t));
+ var_length_buffer = array.GetBuffer(2)->data();
+ } else if (type_id == Type::MAP) {
+ metadata = KeyColumnMetadata(false, sizeof(uint32_t));
+ var_length_buffer = list_values_buffer;
+ } else if (type_id == Type::LIST) {
+ metadata = KeyColumnMetadata(false, sizeof(uint32_t));
+ var_length_buffer = list_values_buffer;
+ } else if (type_id == Type::LARGE_LIST) {
+ metadata = KeyColumnMetadata(false, sizeof(uint64_t));
+ var_length_buffer = list_values_buffer;
+ } else if (type_id == Type::FIXED_SIZE_LIST) {
+ auto list_type = checked_cast<const FixedSizeListType*>(array.type);
+ metadata = KeyColumnMetadata(true, list_type->list_size());
+ fixed_length_buffer = list_values_buffer;
+ } else {
+ return Status::TypeError("Unsupported column data type ",
array.type->name(),
+ " used with hash_64 compute kernel");
+ }
+
+ return KeyColumnArray(metadata, array.length, validity_buffer,
fixed_length_buffer,
+ var_length_buffer);
+ }
+
+ static Result<std::shared_ptr<ArrayData>> HashChild(const ArraySpan& array,
+ const ArraySpan& child,
+ LightContext* hash_ctx,
+ MemoryPool* memory_pool)
{
+ auto buffer_size = child.length * sizeof(uint64_t);
+ ARROW_ASSIGN_OR_RAISE(auto buffer, AllocateBuffer(buffer_size,
memory_pool));
+ ARROW_RETURN_NOT_OK(
+ HashArray(child, hash_ctx, memory_pool,
buffer->mutable_data_as<uint64_t>()));
+ return ArrayData::Make(uint64(), child.length,
+ {array.GetBuffer(0), std::move(buffer)},
array.null_count);
+ }
+
+ static Status HashArray(const ArraySpan& array, LightContext* hash_ctx,
+ MemoryPool* memory_pool, uint64_t* out) {
+ // KeyColumnArray objects are being passed to the hashing utility
+ std::vector<KeyColumnArray> columns(1);
+ // ensure that we keep the converted child arrays alive because
KeyColumnArray
+ // only provides a view into the original array data
+ std::vector<std::shared_ptr<ArrayData>> children;
+
+ auto type_id = array.type->id();
+ if (type_id == Type::STRUCT) {
+ columns.reserve(array.child_data.size());
+ for (size_t i = 0; i < array.child_data.size(); i++) {
+ auto child = array.child_data[i];
+ if (is_nested(child.type->id())) {
+ ARROW_ASSIGN_OR_RAISE(auto child_hashes,
+ HashChild(array, child, hash_ctx,
memory_pool));
+ children.push_back(child_hashes);
+ ARROW_ASSIGN_OR_RAISE(columns[i], ToColumnArray(*child_hashes,
hash_ctx));
+ } else {
+ ARROW_ASSIGN_OR_RAISE(columns[i], ToColumnArray(child, hash_ctx));
+ }
+ }
+ } else if (is_list_like(type_id)) {
+ auto values = array.child_data[0];
+ ARROW_ASSIGN_OR_RAISE(auto value_hashes,
+ HashChild(array, values, hash_ctx, memory_pool));
+ children.push_back(value_hashes);
+ ARROW_ASSIGN_OR_RAISE(
+ columns[0], ToColumnArray(array, hash_ctx,
value_hashes->buffers[1]->data()));
+ } else {
+ ARROW_ASSIGN_OR_RAISE(columns[0], ToColumnArray(array, hash_ctx));
+ }
+ Hashing64::HashMultiColumn(columns, hash_ctx, out);
+ return Status::OK();
+ }
+
+ static Status Exec(KernelContext* ctx, const ExecSpan& input_arg,
ExecResult* out) {
+ if (input_arg.num_values() != 1 || !input_arg[0].is_array()) {
+ return Status::Invalid("FastHash currently supports a single array
input");
+ }
+ ArraySpan hash_input = input_arg[0].array;
+
+ auto exec_ctx = default_exec_context();
+ if (ctx && ctx->exec_context()) {
+ exec_ctx = ctx->exec_context();
+ }
+
+ // Initialize stack-based memory allocator used by Hashing32 and Hashing64
+ util::TempVectorStack stack_memallocator;
+ ARROW_RETURN_NOT_OK(
+ stack_memallocator.Init(exec_ctx->memory_pool(),
+ 3 * sizeof(int32_t) *
util::MiniBatch::kMiniBatchLength));
+
+ // Prepare context used by Hashing32 and Hashing64
+ LightContext hash_ctx;
+ hash_ctx.hardware_flags = exec_ctx->cpu_info()->hardware_flags();
+ hash_ctx.stack = &stack_memallocator;
+
+ // Call the hashing function, overloaded based on OutputCType
+ ArraySpan* result_span = out->array_span_mutable();
+ uint64_t* result_ptr = result_span->GetValues<uint64_t>(1);
+ ARROW_RETURN_NOT_OK(
+ HashArray(hash_input, &hash_ctx, exec_ctx->memory_pool(), result_ptr));
+
+ return Status::OK();
+ }
+};
+
+// ------------------------------
+// Function construction and kernel registration
+std::shared_ptr<ScalarFunction> RegisterKernelsFastHash64() {
+ // Create function instance
+ auto fn_hash_64 =
+ std::make_shared<ScalarFunction>("hash_64", Arity::Unary(), hash_64_doc);
+
+ // Add 64-bit hash kernel
+ ScalarKernel kernel({InputType()}, OutputType(uint64()),
FastHashScalar::Exec);
+ kernel.null_handling = NullHandling::OUTPUT_NOT_NULL;
Review Comment:
Is this correct? The function doc says this function emits null for null
input so this should be `NullHandling::INTERSECTION` (defaulted)?
##########
docs/source/cpp/compute.rst:
##########
@@ -1231,6 +1231,26 @@ Containment tests
* \(8) Output is true iff :member:`MatchSubstringOptions::pattern`
matches the corresponding input element at any position.
+
+Hash Functions
+~~~~~~~~~~~~~~
+
+Not to be confused with the "group by" functions, Hash functions produce an
array of hash
+values corresponding to the length of the input. Currently, these functions
take a single
+array as input.
+
++-----------------------+-------+-----------------------------------+-------------+---------------+-------+
+| Function name | Arity | Input types | Output
type | Options class | Notes |
++=======================+=======+===================================+=============+===============+=======+
+| hash_64 | Unary | Any | UInt64
| | \(1) |
++-----------------------+-------+-----------------------------------+-------------+---------------+-------+
Review Comment:
```suggestion
+---------------+-------+-------------+-------------+---------------+-------+
| Function name | Arity | Input types | Output type | Options class | Notes |
+===============+=======+=============+=============+===============+=======+
| hash_64 | Unary | Any | UInt64 | | \(1) |
+---------------+-------+-------------+-------------+---------------+-------+
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]