huaxingao commented on code in PR #348: URL: https://github.com/apache/datafusion-comet/pull/348#discussion_r1590463378
########## spark/src/test/resources/tpcds-query-results/v1_4/q39a.sql.out: ########## @@ -31,7 +31,7 @@ struct<w_warehouse_sk:int,i_item_sk:int,d_moy:int,mean:double,cov:double,w_wareh 1 12259 1 326.5 1.219693210219279 1 12259 2 292.6666666666667 1.2808898286830026 1 12641 1 321.25 1.1286221893301993 1 12641 2 279.25 1.129134558577743 1 13043 1 260.5 1.355894484625015 1 13043 2 295.0 1.056210118409035 -1 13157 1 260.5 1.5242630430075292 1 13157 2 413.5 1.0422561797285326 +1 13157 1 260.5 1.524263043007529 1 13157 2 413.5 1.0422561797285326 Review Comment: Yes, we are currently just comparing text file output directly. We are using Spark's TPCDSQuerySuite. It doesn't seem to be a way to generate the output into a structured file type. ########## core/src/execution/datafusion/expressions/stddev.rs: ########## @@ -0,0 +1,185 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Defines physical expressions that can evaluated at runtime during query execution + +use std::{any::Any, sync::Arc}; + +use crate::execution::datafusion::expressions::{ + stats::StatsType, utils::down_cast_any_ref, variance::VarianceAccumulator, +}; +use arrow::{ + array::ArrayRef, + datatypes::{DataType, Field}, +}; +use datafusion::logical_expr::Accumulator; +use datafusion_common::{internal_err, Result, ScalarValue}; +use datafusion_physical_expr::{expressions::format_state_name, AggregateExpr, PhysicalExpr}; + +/// STDDEV and STDDEV_SAMP (standard deviation) aggregate expression +/// The implementation mostly is the same as the DataFusion's implementation. The reason +/// we have our own implementation is that DataFusion has UInt64 for state_field `count`, +/// while Spark has Double for count. Also we have added `null_on_divide_by_zero` +/// to be consistent with Spark's implementation. +#[derive(Debug)] +pub struct Stddev { + name: String, + expr: Arc<dyn PhysicalExpr>, + stats_type: StatsType, + null_on_divide_by_zero: bool, +} + +impl Stddev { + /// Create a new STDDEV aggregate function + pub fn new( + expr: Arc<dyn PhysicalExpr>, + name: impl Into<String>, + data_type: DataType, + stats_type: StatsType, + null_on_divide_by_zero: bool, + ) -> Self { + // the result of stddev just support FLOAT64 and Decimal data type. + assert!(matches!(data_type, DataType::Float64)); + Self { + name: name.into(), + expr, + stats_type, + null_on_divide_by_zero, + } + } +} + +impl AggregateExpr for Stddev { + /// Return a reference to Any that can be used for downcasting + fn as_any(&self) -> &dyn Any { + self + } + + fn field(&self) -> Result<Field> { + Ok(Field::new(&self.name, DataType::Float64, true)) + } + + fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> { + Ok(Box::new(StddevAccumulator::try_new( + self.stats_type, + self.null_on_divide_by_zero, + )?)) + } + + fn create_sliding_accumulator(&self) -> Result<Box<dyn Accumulator>> { + Ok(Box::new(StddevAccumulator::try_new( + self.stats_type, + self.null_on_divide_by_zero, + )?)) + } + + fn state_fields(&self) -> Result<Vec<Field>> { + Ok(vec![ + Field::new( + format_state_name(&self.name, "count"), + DataType::Float64, + true, + ), + Field::new( + format_state_name(&self.name, "mean"), + DataType::Float64, + true, + ), + Field::new(format_state_name(&self.name, "m2"), DataType::Float64, true), + ]) + } + + fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> { + vec![self.expr.clone()] + } + + fn name(&self) -> &str { + &self.name + } +} + +impl PartialEq<dyn Any> for Stddev { + fn eq(&self, other: &dyn Any) -> bool { + down_cast_any_ref(other) + .downcast_ref::<Self>() + .map(|x| { + self.name == x.name + && self.expr.eq(&x.expr) + && self.null_on_divide_by_zero == x.null_on_divide_by_zero Review Comment: Yes. Fixed. Thanks ########## core/src/execution/datafusion/expressions/stddev.rs: ########## @@ -0,0 +1,185 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +//! Defines physical expressions that can evaluated at runtime during query execution + +use std::{any::Any, sync::Arc}; + +use crate::execution::datafusion::expressions::{ + stats::StatsType, utils::down_cast_any_ref, variance::VarianceAccumulator, +}; +use arrow::{ + array::ArrayRef, + datatypes::{DataType, Field}, +}; +use datafusion::logical_expr::Accumulator; +use datafusion_common::{internal_err, Result, ScalarValue}; +use datafusion_physical_expr::{expressions::format_state_name, AggregateExpr, PhysicalExpr}; + +/// STDDEV and STDDEV_SAMP (standard deviation) aggregate expression +/// The implementation mostly is the same as the DataFusion's implementation. The reason +/// we have our own implementation is that DataFusion has UInt64 for state_field `count`, +/// while Spark has Double for count. Also we have added `null_on_divide_by_zero` +/// to be consistent with Spark's implementation. +#[derive(Debug)] +pub struct Stddev { + name: String, + expr: Arc<dyn PhysicalExpr>, + stats_type: StatsType, + null_on_divide_by_zero: bool, +} + +impl Stddev { + /// Create a new STDDEV aggregate function + pub fn new( + expr: Arc<dyn PhysicalExpr>, + name: impl Into<String>, + data_type: DataType, + stats_type: StatsType, + null_on_divide_by_zero: bool, + ) -> Self { + // the result of stddev just support FLOAT64 and Decimal data type. + assert!(matches!(data_type, DataType::Float64)); + Self { + name: name.into(), + expr, + stats_type, + null_on_divide_by_zero, + } + } +} + +impl AggregateExpr for Stddev { + /// Return a reference to Any that can be used for downcasting + fn as_any(&self) -> &dyn Any { + self + } + + fn field(&self) -> Result<Field> { + Ok(Field::new(&self.name, DataType::Float64, true)) + } + + fn create_accumulator(&self) -> Result<Box<dyn Accumulator>> { + Ok(Box::new(StddevAccumulator::try_new( + self.stats_type, + self.null_on_divide_by_zero, + )?)) + } + + fn create_sliding_accumulator(&self) -> Result<Box<dyn Accumulator>> { + Ok(Box::new(StddevAccumulator::try_new( + self.stats_type, + self.null_on_divide_by_zero, + )?)) + } + + fn state_fields(&self) -> Result<Vec<Field>> { + Ok(vec![ + Field::new( + format_state_name(&self.name, "count"), + DataType::Float64, + true, + ), + Field::new( + format_state_name(&self.name, "mean"), + DataType::Float64, + true, + ), + Field::new(format_state_name(&self.name, "m2"), DataType::Float64, true), + ]) + } + + fn expressions(&self) -> Vec<Arc<dyn PhysicalExpr>> { + vec![self.expr.clone()] + } + + fn name(&self) -> &str { + &self.name + } +} + +impl PartialEq<dyn Any> for Stddev { + fn eq(&self, other: &dyn Any) -> bool { + down_cast_any_ref(other) + .downcast_ref::<Self>() + .map(|x| { + self.name == x.name + && self.expr.eq(&x.expr) + && self.null_on_divide_by_zero == x.null_on_divide_by_zero + }) + .unwrap_or(false) + } +} + +/// An accumulator to compute the average +#[derive(Debug)] +pub struct StddevAccumulator { + variance: VarianceAccumulator, +} + +impl StddevAccumulator { + /// Creates a new `StddevAccumulator` + pub fn try_new(s_type: StatsType, null_on_divide_by_zero: bool) -> Result<Self> { + Ok(Self { + variance: VarianceAccumulator::try_new(s_type, null_on_divide_by_zero)?, + }) + } + + pub fn get_m2(&self) -> f64 { + self.variance.get_m2() + } +} + +impl Accumulator for StddevAccumulator { + fn state(&mut self) -> Result<Vec<ScalarValue>> { + Ok(vec![ + ScalarValue::from(self.variance.get_count()), + ScalarValue::from(self.variance.get_mean()), + ScalarValue::from(self.variance.get_m2()), + ]) + } + + fn update_batch(&mut self, values: &[ArrayRef]) -> Result<()> { + self.variance.update_batch(values) + } + + fn retract_batch(&mut self, values: &[ArrayRef]) -> Result<()> { + self.variance.retract_batch(values) + } + + fn merge_batch(&mut self, states: &[ArrayRef]) -> Result<()> { + self.variance.merge_batch(states) + } + + fn evaluate(&mut self) -> Result<ScalarValue> { + let variance = self.variance.evaluate()?; + match variance { + ScalarValue::Float64(e) => { + if e.is_none() { + Ok(ScalarValue::Float64(None)) + } else { + Ok(ScalarValue::Float64(e.map(|f| f.sqrt()))) + } + } + _ => internal_err!("Variance should be f64"), + } Review Comment: Changed. Thanks -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected] --------------------------------------------------------------------- To unsubscribe, e-mail: [email protected] For additional commands, e-mail: [email protected]
