parthchandra commented on code in PR #3804:
URL: https://github.com/apache/datafusion-comet/pull/3804#discussion_r3061432155


##########
native/spark-expr/src/datetime_funcs/hours.rs:
##########
@@ -0,0 +1,274 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Spark-compatible `hours` V2 partition transform.
+//!
+//! Computes the number of hours since the Unix epoch (1970-01-01 00:00:00 
UTC).
+//!
+//! Both `TimestampType` and `TimestampNTZType` are computationally identical. 
They
+//! extract the absolute hours since the epoch by directly dividing the 
microsecond
+//! value by the number of microseconds in an hour, ignoring session timezone 
offsets.
+
+use arrow::array::cast::as_primitive_array;
+use arrow::array::types::TimestampMicrosecondType;
+use arrow::array::{Array, Int32Array};
+use arrow::datatypes::{DataType, TimeUnit::Microsecond};
+use datafusion::common::{internal_datafusion_err, DataFusionError};
+use datafusion::logical_expr::{
+    ColumnarValue, ScalarFunctionArgs, ScalarUDFImpl, Signature, Volatility,
+};
+use std::{any::Any, fmt::Debug, sync::Arc};
+
+const MICROS_PER_HOUR: i64 = 3_600_000_000;
+
+#[derive(Debug, PartialEq, Eq, Hash)]
+pub struct SparkHoursTransform {
+    signature: Signature,
+}
+
+impl SparkHoursTransform {
+    pub fn new() -> Self {
+        Self {
+            signature: Signature::user_defined(Volatility::Immutable),
+        }
+    }
+}
+
+impl ScalarUDFImpl for SparkHoursTransform {
+    fn as_any(&self) -> &dyn Any {
+        self
+    }
+
+    fn name(&self) -> &str {
+        "hours_transform"
+    }
+
+    fn signature(&self) -> &Signature {
+        &self.signature
+    }
+
+    fn return_type(&self, _arg_types: &[DataType]) -> 
datafusion::common::Result<DataType> {
+        Ok(DataType::Int32)
+    }
+
+    fn invoke_with_args(
+        &self,
+        args: ScalarFunctionArgs,
+    ) -> datafusion::common::Result<ColumnarValue> {
+        let args: [ColumnarValue; 1] = args.args.try_into().map_err(|_| {
+            internal_datafusion_err!("hours_transform expects exactly one 
argument")
+        })?;
+
+        match args {
+            [ColumnarValue::Array(array)] => {
+                let ts_array = 
as_primitive_array::<TimestampMicrosecondType>(&array);
+                let result: Int32Array = match array.data_type() {
+                    DataType::Timestamp(Microsecond, _) => {
+                        arrow::compute::kernels::arity::unary(ts_array, 
|micros| {
+                            micros.div_euclid(MICROS_PER_HOUR) as i32

Review Comment:
   Why `div_euclid`? Elsewhere the code is generally using `div_floor`



##########
spark/src/main/scala/org/apache/comet/serde/datetime.scala:
##########
@@ -589,6 +589,36 @@ object CometDateFormat extends 
CometExpressionSerde[DateFormatClass] {
   }
 }
 
+/**
+ * Converts a timestamp to the number of hours since Unix epoch (1970-01-01 
00:00:00 UTC). This is
+ * a V2 partition transform expression.
+ *
+ * Both TimestampType and TimestampNTZType use direct division of the raw 
microsecond value
+ * without applying any session timezone offset.
+ */
+object CometHours extends CometExpressionSerde[Hours] {
+  override def convert(
+      expr: Hours,
+      inputs: Seq[Attribute],
+      binding: Boolean): Option[ExprOuterClass.Expr] = {
+    val childExpr = exprToProtoInternal(expr.child, inputs, binding)
+
+    if (childExpr.isDefined) {

Review Comment:
   It might be better to explicitly check the child expr datatype and only 
allow valid types, fall back otherwise.
   See `CometDays` below.  



##########
native/spark-expr/src/datetime_funcs/hours.rs:
##########
@@ -0,0 +1,274 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+//! Spark-compatible `hours` V2 partition transform.
+//!
+//! Computes the number of hours since the Unix epoch (1970-01-01 00:00:00 
UTC).
+//!
+//! Both `TimestampType` and `TimestampNTZType` are computationally identical. 
They
+//! extract the absolute hours since the epoch by directly dividing the 
microsecond
+//! value by the number of microseconds in an hour, ignoring session timezone 
offsets.
+
+use arrow::array::cast::as_primitive_array;
+use arrow::array::types::TimestampMicrosecondType;
+use arrow::array::{Array, Int32Array};
+use arrow::datatypes::{DataType, TimeUnit::Microsecond};
+use datafusion::common::{internal_datafusion_err, DataFusionError};
+use datafusion::logical_expr::{
+    ColumnarValue, ScalarFunctionArgs, ScalarUDFImpl, Signature, Volatility,
+};
+use std::{any::Any, fmt::Debug, sync::Arc};
+
+const MICROS_PER_HOUR: i64 = 3_600_000_000;
+
+#[derive(Debug, PartialEq, Eq, Hash)]
+pub struct SparkHoursTransform {
+    signature: Signature,
+}
+
+impl SparkHoursTransform {
+    pub fn new() -> Self {
+        Self {
+            signature: Signature::user_defined(Volatility::Immutable),
+        }
+    }
+}
+
+impl ScalarUDFImpl for SparkHoursTransform {
+    fn as_any(&self) -> &dyn Any {
+        self
+    }
+
+    fn name(&self) -> &str {
+        "hours_transform"
+    }
+
+    fn signature(&self) -> &Signature {
+        &self.signature
+    }
+
+    fn return_type(&self, _arg_types: &[DataType]) -> 
datafusion::common::Result<DataType> {
+        Ok(DataType::Int32)
+    }
+
+    fn invoke_with_args(
+        &self,
+        args: ScalarFunctionArgs,
+    ) -> datafusion::common::Result<ColumnarValue> {
+        let args: [ColumnarValue; 1] = args.args.try_into().map_err(|_| {
+            internal_datafusion_err!("hours_transform expects exactly one 
argument")
+        })?;
+
+        match args {
+            [ColumnarValue::Array(array)] => {
+                let ts_array = 
as_primitive_array::<TimestampMicrosecondType>(&array);

Review Comment:
   This should be after the `match` on `array.data_type`  in the 
`DataType::Timestamp(Microsecond, _)` arm. This would panic for other types.



##########
spark/src/test/scala/org/apache/comet/CometTemporalExpressionSuite.scala:
##########
@@ -489,4 +489,58 @@ class CometTemporalExpressionSuite extends CometTestBase 
with AdaptiveSparkPlanH
         dummyDF.selectExpr("unix_date(cast(NULL as date))"))
     }
   }
+
+  /**
+   * Checks that the Comet-evaluated DataFrame produces the same results as 
the baseline DataFrame
+   * evaluated by native Spark JVM, and that Comet native operators are used. 
This is needed
+   * because Hours is a PartitionTransformExpression that extends Unevaluable.
+   */
+  private def checkHours(cometDF: DataFrame, baselineDF: DataFrame): Unit = {
+    // Ensure the expected answer is evaluated solely by native Spark JVM 
(Comet off)
+    var expected: Array[Row] = Array.empty
+    withSQLConf(CometConf.COMET_ENABLED.key -> "false") {
+      expected = baselineDF.collect()
+    }
+    checkAnswer(cometDF, expected.toSeq)
+    checkCometOperators(stripAQEPlan(cometDF.queryExecution.executedPlan))
+  }
+
+  test("hours - timestamp input") {
+    import org.apache.spark.sql.catalyst.expressions.Hours
+    val r = new Random(42)
+    val tsSchema = StructType(Seq(StructField("ts", DataTypes.TimestampType, 
true)))
+    val tsDF = FuzzDataGenerator.generateDataFrame(r, spark, tsSchema, 1000, 
DataGenOptions())
+
+    for (timezone <- Seq("UTC", "America/Los_Angeles", "Asia/Tokyo")) {
+      withSQLConf(SQLConf.SESSION_LOCAL_TIMEZONE.key -> timezone) {
+        checkHours(
+          tsDF.select(col("ts"), 
getColumnFromExpression(Hours(UnresolvedAttribute("ts")))),
+          tsDF.selectExpr("ts", "cast(floor(unix_micros(ts) / 3600000000D) as 
int)"))
+      }
+    }
+  }
+
+  test("hours - timestamp_ntz input") {
+    import org.apache.spark.sql.catalyst.expressions.Hours
+    val r = new Random(42)
+    val ntzSchema = StructType(Seq(StructField("ts", 
DataTypes.TimestampNTZType, true)))
+    val ntzDF = FuzzDataGenerator.generateDataFrame(r, spark, ntzSchema, 1000, 
DataGenOptions())
+
+    val _spark = spark
+    import _spark.implicits._
+    val expectedDF = ntzDF
+      .map { row =>
+        val ts = row.getAs[java.time.LocalDateTime]("ts")
+        val micros = if (ts != null) {
+          
org.apache.spark.sql.catalyst.util.DateTimeUtils.localDateTimeToMicros(ts)
+        } else 0L // assuming safe non-null

Review Comment:
   If the timestamp generated by the generator is `null`, then `hours` should 
return `null`. This will return `0`. 



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to