maropu commented on a change in pull request #30957:
URL: https://github.com/apache/spark/pull/30957#discussion_r570776893
##########
File path:
sql/core/src/test/scala/org/apache/spark/sql/execution/BaseScriptTransformationSuite.scala
##########
@@ -471,6 +473,126 @@ abstract class BaseScriptTransformationSuite extends
SparkPlanTest with SQLTestU
}
}
+ test("SPARK-31936: Script transform support ArrayType/MapType/StructType (no
serde)") {
+ assume(TestUtils.testCommandAvailable("python"))
+ withTempView("v") {
+ val df = Seq(
+ (Array(0, 1, 2), Array(Array(0, 1), Array(2)),
+ Map("a" -> 1), Map("b" -> Array("a", "b"))),
+ (Array(3, 4, 5), Array(Array(3, 4), Array(5)),
+ Map("b" -> 2), Map("c" -> Array("c", "d"))),
+ (Array(6, 7, 8), Array(Array(6, 7), Array(8)),
+ Map("c" -> 3), Map("d" -> Array("e", "f")))
+ ).toDF("a", "b", "c", "d")
+ .select('a, 'b, 'c, 'd,
+ struct('a, 'b).as("e"),
+ struct('a, 'd).as("f"),
+ struct(struct('a, 'b), struct('a, 'd)).as("g")
+ )
+
+ checkAnswer(
+ df,
+ (child: SparkPlan) => createScriptTransformationExec(
+ input = Seq(
+ df.col("a").expr,
+ df.col("b").expr,
+ df.col("c").expr,
+ df.col("d").expr,
+ df.col("e").expr,
+ df.col("f").expr,
+ df.col("g").expr),
+ script = "cat",
+ output = Seq(
+ AttributeReference("a", ArrayType(IntegerType))(),
+ AttributeReference("b", ArrayType(ArrayType(IntegerType)))(),
+ AttributeReference("c", MapType(StringType, IntegerType))(),
+ AttributeReference("d", MapType(StringType,
ArrayType(StringType)))(),
+ AttributeReference("e", StructType(
+ Array(StructField("a", ArrayType(IntegerType)),
+ StructField("b", ArrayType(ArrayType(IntegerType))))))(),
+ AttributeReference("f", StructType(
+ Array(StructField("a", ArrayType(IntegerType)),
+ StructField("d", MapType(StringType,
ArrayType(StringType))))))(),
+ AttributeReference("g", StructType(
+ Array(StructField("col1", StructType(
+ Array(StructField("a", ArrayType(IntegerType)),
+ StructField("b", ArrayType(ArrayType(IntegerType)))))),
+ StructField("col2", StructType(
+ Array(StructField("a", ArrayType(IntegerType)),
+ StructField("d", MapType(StringType,
ArrayType(StringType)))))))))()),
+ child = child,
+ ioschema = defaultIOSchema
+ ),
+ df.select('a, 'b, 'c, 'd, 'e, 'f, 'g).collect())
+ }
+ }
+
+ test("SPARK-31936: Script transform support nested complex type (no serde)")
{
Review comment:
We still need this test now? On second thought, only the test
`"SPARK-31936: Script transform support ArrayType/MapType/StructType (no
serde)"` looks fine.
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -47,7 +47,13 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
def ioschema: ScriptTransformationIOSchema
protected lazy val inputExpressionsWithoutSerde: Seq[Expression] = {
- input.map(Cast(_, StringType).withTimeZone(conf.sessionLocalTimeZone))
+ input.map { in =>
Review comment:
nit format:
```
input.map { _.dataType match {
case _: ArrayType | _: MapType | _: StructType =>
new StructsToJson(in).withTimeZone(conf.sessionLocalTimeZone)
case _ => Cast(in, StringType).withTimeZone(conf.sessionLocalTimeZone)
}
```
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -220,6 +226,9 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
case CalendarIntervalType => wrapperConvertException(
data => IntervalUtils.stringToInterval(UTF8String.fromString(data)),
converter)
+ case _: ArrayType | _: MapType | _: StructType =>
+ wrapperConvertException(data => JsonToStructs(attr.dataType,
Map.empty[String, String],
Review comment:
This can cause much overhead cuz this make a new object (`JsonToStructs
`) for each call. Could you avoid it?
##########
File path:
sql/core/src/main/scala/org/apache/spark/sql/execution/BaseScriptTransformationExec.scala
##########
@@ -47,7 +47,13 @@ trait BaseScriptTransformationExec extends UnaryExecNode {
def ioschema: ScriptTransformationIOSchema
protected lazy val inputExpressionsWithoutSerde: Seq[Expression] = {
- input.map(Cast(_, StringType).withTimeZone(conf.sessionLocalTimeZone))
+ input.map { in =>
+ in.dataType match {
+ case _: ArrayType | _: MapType | _: StructType =>
+ new StructsToJson(in).withTimeZone(conf.sessionLocalTimeZone)
Review comment:
Is it okay to follow the default behaviour w/o `options`?
----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]