vibhatha commented on code in PR #12590: URL: https://github.com/apache/arrow/pull/12590#discussion_r841716415
########## cpp/src/arrow/python/udf.cc: ########## @@ -0,0 +1,128 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#include "arrow/python/udf.h" + +#include <cstddef> +#include <memory> +#include <sstream> + +#include "arrow/compute/function.h" +#include "arrow/python/common.h" + +namespace arrow { + +namespace py { + +Status VerifyArityAndInput(compute::Arity arity, const compute::ExecBatch& batch) { + bool match = static_cast<uint64_t>(arity.num_args) == batch.values.size(); + if (!match) { + return Status::Invalid("Function Arity and Input data shape doesn't match, expected ", + arity.num_args, ", got ", batch.values.size()); + } + return Status::OK(); +} + +Status ExecFunctionScalar(const compute::ExecBatch& batch, PyObject* function, + int num_args, Datum* out) { + std::shared_ptr<Scalar> c_res_data; + PyObject* arg_tuple = PyTuple_New(num_args); + for (int arg_id = 0; arg_id < num_args; arg_id++) { + if (!batch[arg_id].is_scalar()) { + return Status::Invalid("Input type and data type doesn't match"); + } + auto c_data = batch[arg_id].scalar(); + PyObject* data = wrap_scalar(c_data); + PyTuple_SetItem(arg_tuple, arg_id, data); + } + PyObject* result = PyObject_CallObject(function, arg_tuple); + if (result == NULL) { + return Status::ExecutionError("Output is null, but expected a scalar"); + } + if (!is_scalar(result)) { + return Status::Invalid("Output from function is not a scalar"); + } + ARROW_ASSIGN_OR_RAISE(auto unwrapped_result, unwrap_scalar(result)); + *out = unwrapped_result; + return Status::OK(); +} + +Status ExecFunctionArray(const compute::ExecBatch& batch, PyObject* function, + int num_args, Datum* out) { + std::shared_ptr<Array> c_res_data; + PyObject* arg_tuple = PyTuple_New(num_args); + for (int arg_id = 0; arg_id < num_args; arg_id++) { + if (!batch[arg_id].is_array()) { + return Status::Invalid("Input type and data type doesn't match"); + } + auto c_data = batch[arg_id].make_array(); + PyObject* data = wrap_array(c_data); + PyTuple_SetItem(arg_tuple, arg_id, data); + } + PyObject* result = PyObject_CallObject(function, arg_tuple); + if (result == NULL) { + return Status::ExecutionError("Output is null, but expected an array"); + } + if (!is_array(result)) { + return Status::Invalid("Output from function is not an array"); + } + return unwrap_array(result).Value(out); +} + +Status ScalarUdfBuilder::MakeFunction(PyObject* function, ScalarUdfOptions* options) { + if (function == NULL) { + return Status::ExecutionError("python function cannot be null"); + } + Py_INCREF(function); + function_.reset(function); + if (!PyCallable_Check(function_.obj())) { + return Status::TypeError("Expected a callable python object."); + } + auto doc = options->doc(); + auto arity = options->arity(); + scalar_func_ = std::make_shared<compute::ScalarFunction>(options->name(), arity, doc); + + // lambda function + auto exec = [this, arity](compute::KernelContext* ctx, const compute::ExecBatch& batch, Review Comment: Hmm... -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: github-unsubscr...@arrow.apache.org For queries about this service, please contact Infrastructure at: us...@infra.apache.org