comphead commented on code in PR #18569: URL: https://github.com/apache/datafusion/pull/18569#discussion_r2627929878
########## datafusion/spark/src/function/aggregate/try_sum.rs: ########## @@ -0,0 +1,650 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +use arrow::array::{ArrayRef, ArrowNumericType, AsArray, BooleanArray, PrimitiveArray}; +use arrow::datatypes::{ + DECIMAL128_MAX_PRECISION, DataType, Decimal128Type, Field, FieldRef, Float64Type, + Int64Type, UInt64Type, +}; +use datafusion_common::{Result, ScalarValue, downcast_value, exec_err, not_impl_err}; +use datafusion_expr::function::{AccumulatorArgs, StateFieldsArgs}; +use datafusion_expr::utils::format_state_name; +use datafusion_expr::{Accumulator, AggregateUDFImpl, Signature, Volatility}; +use std::any::Any; +use std::fmt::{Debug, Formatter}; +use std::mem::size_of_val; + +#[derive(PartialEq, Eq, Hash)] +pub struct SparkTrySum { + signature: Signature, +} + +impl Default for SparkTrySum { + fn default() -> Self { + Self::new() + } +} + +impl SparkTrySum { + pub fn new() -> Self { + Self { + signature: Signature::user_defined(Volatility::Immutable), + } + } +} + +impl Debug for SparkTrySum { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + f.debug_struct("SparkTrySum") + .field("signature", &self.signature) + .finish() + } +} + +/// Accumulator for try_sum that detects overflow +struct TrySumAccumulator<T: ArrowNumericType> { + sum: Option<T::Native>, + data_type: DataType, + failed: bool, + // Only used if data_type is Decimal128(p, s) + dec_precision: Option<u8>, +} + +impl<T: ArrowNumericType> Debug for TrySumAccumulator<T> { + fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { + write!(f, "TrySumAccumulator({})", self.data_type) + } +} + +impl<T: ArrowNumericType> TrySumAccumulator<T> { + fn new(data_type: DataType) -> Self { + let dec_precision = match &data_type { + DataType::Decimal128(p, _) => Some(*p), + _ => None, + }; + Self { + sum: None, + data_type, + failed: false, + dec_precision, + } + } +} + +impl<T: ArrowNumericType> Accumulator for TrySumAccumulator<T> { + fn state(&mut self) -> Result<Vec<ScalarValue>> { + Ok(vec![ + self.evaluate()?, + ScalarValue::Boolean(Some(self.failed)), + ]) + } + + fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> { + update_batch_internal(self, values) + } + + fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> { + // Check if any partition has failed + let failed_arr = downcast_value!(states[1], BooleanArray); + for failed in failed_arr.iter().flatten() { + if failed { + self.failed = true; + return Ok(()); + } + } + + // Merge the sum values using the same logic as update_batch + update_batch_internal(self, states) + } + + fn evaluate(&mut self) -> Result<ScalarValue> { + evaluate_internal(self) + } + + fn size(&self) -> usize { + size_of_val(self) + } +} + +// Specialized implementations for update_batch for each type + +fn update_batch_internal<T: ArrowNumericType>( + acc: &mut TrySumAccumulator<T>, + values: &[ArrayRef], +) -> Result<()> { + if values.is_empty() || acc.failed { + return Ok(()); + } + + let array = values[0].as_primitive::<T>(); + + // Specialize based on the type + if std::any::TypeId::of::<T>() == std::any::TypeId::of::<Int64Type>() { + update_int64(acc, array) + } else if std::any::TypeId::of::<T>() == std::any::TypeId::of::<Float64Type>() { + update_float64(acc, array) + } else if std::any::TypeId::of::<T>() == std::any::TypeId::of::<Decimal128Type>() { + update_decimal128(acc, array) + } else { + exec_err!( + "try_sum: unsupported type in update_batch: {:?}", + acc.data_type + ) + } +} + +fn update_int64<T: ArrowNumericType>( + acc: &mut TrySumAccumulator<T>, + array: &PrimitiveArray<T>, +) -> Result<()> { + for v in array.iter().flatten() { + // Cast to i64 for checked_add + let v_i64 = unsafe { std::mem::transmute_copy::<T::Native, i64>(&v) }; + let sum_i64 = acc + .sum + .map(|s| unsafe { std::mem::transmute_copy::<T::Native, i64>(&s) }); + + let new_sum = match sum_i64 { + None => v_i64, + Some(s) => match s.checked_add(v_i64) { + Some(result) => result, + None => { + acc.failed = true; + return Ok(()); + } + }, + }; + + acc.sum = Some(unsafe { std::mem::transmute_copy::<i64, T::Native>(&new_sum) }); + } + Ok(()) +} + +fn update_float64<T: ArrowNumericType>( + acc: &mut TrySumAccumulator<T>, + array: &PrimitiveArray<T>, +) -> Result<()> { + for v in array.iter().flatten() { + let v_f64 = unsafe { std::mem::transmute_copy::<T::Native, f64>(&v) }; + let sum_f64 = acc + .sum + .map(|s| unsafe { std::mem::transmute_copy::<T::Native, f64>(&s) }) + .unwrap_or(0.0); + let new_sum = sum_f64 + v_f64; + acc.sum = Some(unsafe { std::mem::transmute_copy::<f64, T::Native>(&new_sum) }); + } + Ok(()) +} + +fn update_decimal128<T: ArrowNumericType>( + acc: &mut TrySumAccumulator<T>, + array: &PrimitiveArray<T>, +) -> Result<()> { + let precision = acc.dec_precision.unwrap_or(38); + + for v in array.iter().flatten() { + let v_i128 = unsafe { std::mem::transmute_copy::<T::Native, i128>(&v) }; + let sum_i128 = acc + .sum + .map(|s| unsafe { std::mem::transmute_copy::<T::Native, i128>(&s) }); + + let new_sum = match sum_i128 { + None => v_i128, + Some(s) => match s.checked_add(v_i128) { + Some(result) => result, + None => { + acc.failed = true; + return Ok(()); + } + }, + }; + + if exceeds_decimal128_precision(new_sum, precision) { + acc.failed = true; + return Ok(()); + } + + acc.sum = Some(unsafe { std::mem::transmute_copy::<i128, T::Native>(&new_sum) }); + } + Ok(()) +} + +fn evaluate_internal<T: ArrowNumericType>( + acc: &mut TrySumAccumulator<T>, +) -> Result<ScalarValue> { + if acc.failed { + return ScalarValue::new_primitive::<T>(None, &acc.data_type); + } + ScalarValue::new_primitive::<T>(acc.sum, &acc.data_type) +} + +// Helpers to determine if it exceeds decimal precision +fn pow10_i128(p: u8) -> Option<i128> { + let mut v: i128 = 1; + for _ in 0..p { + v = v.checked_mul(10)?; + } + Some(v) +} + +fn exceeds_decimal128_precision(sum: i128, p: u8) -> bool { + if let Some(max_plus_one) = pow10_i128(p) { + let max = max_plus_one - 1; + sum > max || sum < -max + } else { + true + } +} + +impl AggregateUDFImpl for SparkTrySum { + fn as_any(&self) -> &dyn Any { + self + } + + fn name(&self) -> &str { + "try_sum" + } + + fn signature(&self) -> &Signature { + &self.signature + } + + fn return_type(&self, arg_types: &[DataType]) -> Result<DataType> { + use DataType::*; + + let dt = &arg_types[0]; + let result_type = match dt { + Null => Float64, + Decimal128(p, s) => { + let new_precision = DECIMAL128_MAX_PRECISION.min(p + 10); + Decimal128(new_precision, *s) + } + Int8 | Int16 | Int32 | Int64 | UInt8 | UInt16 | UInt32 | UInt64 => Int64, + Float16 | Float32 | Float64 => Float64, + + other => return exec_err!("try_sum: unsupported type: {other:?}"), + }; + + Ok(result_type) + } + + fn accumulator(&self, acc_args: AccumulatorArgs) -> Result<Box<dyn Accumulator>> { + macro_rules! helper { + ($t:ty, $dt:expr) => { + Ok(Box::new(TrySumAccumulator::<$t>::new($dt.clone()))) + }; + } + + match acc_args.return_field.data_type() { + DataType::Int64 => helper!(Int64Type, acc_args.return_field.data_type()), + DataType::UInt64 => helper!(UInt64Type, acc_args.return_field.data_type()), Review Comment: Spark doesn't use UInt if i'm not mistaken -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
