This is an automated email from the ASF dual-hosted git repository. yao pushed a commit to branch master in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push: new 3b15ee86bd29 [SPARK-52670][SQL] Make HiveResult work with UserDefinedType#stringifyValue 3b15ee86bd29 is described below commit 3b15ee86bd2984cbf34bd509382440169b084ba4 Author: Kent Yao <y...@apache.org> AuthorDate: Fri Jul 4 10:03:20 2025 +0800 [SPARK-52670][SQL] Make HiveResult work with UserDefinedType#stringifyValue ### What changes were proposed in this pull request? Make HiveResult work with UserDefinedType#stringifyValue ### Why are the changes needed? If the `toString` of the underlying class of a UDT is not well-defined, the JDBC/thrift side might not be able to get a meaningful value. ### Does this PR introduce _any_ user-facing change? No ### How was this patch tested? new tests ### Was this patch authored or co-authored using generative AI tooling? no Closes #51358 from yaooqinn/SPARK-52670. Authored-by: Kent Yao <y...@apache.org> Signed-off-by: Kent Yao <y...@apache.org> --- .../org/apache/spark/sql/execution/HiveResult.scala | 2 +- .../apache/spark/sql/execution/HiveResultSuite.scala | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 4 deletions(-) diff --git a/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala b/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala index 21cf70dab59f..3a7b75a555af 100644 --- a/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala +++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/HiveResult.scala @@ -149,6 +149,6 @@ object HiveResult extends SQLConfHelper { startField, endField) case (v: VariantVal, VariantType) => v.toString - case (other, _: UserDefinedType[_]) => other.toString + case (other, u: UserDefinedType[_]) => u.stringifyValue(other) } } diff --git a/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala b/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala index 13c246660e86..ae5b303116de 100644 --- a/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala +++ b/sql/core/src/test/scala/org/apache/spark/sql/execution/HiveResultSuite.scala @@ -17,15 +17,15 @@ package org.apache.spark.sql.execution -import java.time.{Duration, Period} +import java.time.{Duration, Period, Year} +import org.apache.spark.sql.YearUDT import org.apache.spark.sql.catalyst.util.DateTimeTestUtils import org.apache.spark.sql.connector.catalog.InMemoryTableCatalog import org.apache.spark.sql.execution.HiveResult._ import org.apache.spark.sql.internal.SQLConf import org.apache.spark.sql.test.{ExamplePoint, ExamplePointUDT, SharedSparkSession} -import org.apache.spark.sql.types.{YearMonthIntervalType => YM} -import org.apache.spark.sql.types.YearMonthIntervalType +import org.apache.spark.sql.types.{YearMonthIntervalType, YearMonthIntervalType => YM} class HiveResultSuite extends SharedSparkSession { @@ -172,4 +172,18 @@ class HiveResultSuite extends SharedSparkSession { val plan2 = df.selectExpr("array(i)").queryExecution.executedPlan assert(hiveResultString(plan2) === Seq("[5 00:00:00.010000000]")) } + + test("SPARK-52650: Use stringifyValue to get UDT string representation") { + val year = Year.of(18) + val tpe = new YearUDT() + assert(toHiveString((year, tpe), + nested = false, getTimeFormatters, getBinaryFormatter) === "18") + val tpe2 = new YearUDT() { + override def stringifyValue(obj: Any): String = { + f"${obj.asInstanceOf[Year].getValue}%04d" + } + } + assert(toHiveString((year, tpe2), + nested = false, getTimeFormatters, getBinaryFormatter) === "0018") + } } --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org For additional commands, e-mail: commits-h...@spark.apache.org