viirya commented on a change in pull request #32448:
URL: https://github.com/apache/spark/pull/32448#discussion_r650478478
##########
File path:
sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
##########
@@ -150,95 +150,36 @@ class StructTypeSuite extends SparkFunSuite with
SQLHelper {
assert(fromDDL(interval).toDDL === interval)
}
- test("find missing (nested) fields") {
- val schema = StructType.fromDDL("c1 INT, c2 STRUCT<c3: INT, c4: STRUCT<c5:
INT, c6: INT>>")
+ test("SPARK-35290: Struct merging case insensitive") {
+ val schema1 = StructType.fromDDL("a1 INT, a2 STRING, nested STRUCT<b1:
INT, b2: STRING>")
+ val schema2 = StructType.fromDDL("A2 STRING, a3 DOUBLE, nested STRUCT<B2:
STRING, b3: DOUBLE>")
val resolver = SQLConf.get.resolver
- val source1 = StructType.fromDDL("c1 INT")
- val missing1 = StructType.fromDDL("c2 STRUCT<c3: INT, c4: STRUCT<c5: INT,
c6: INT>>")
- assert(StructType.findMissingFields(source1, schema, resolver)
- .exists(_.sameType(missing1)))
+ assert(schema1.merge(schema2, resolver).sameType(StructType.fromDDL(
+ "a1 INT, a2 STRING, nested STRUCT<b1: INT, b2: STRING, b3: DOUBLE>, a3
DOUBLE"
+ )))
- val source2 = StructType.fromDDL("c1 INT, c3 STRING")
- val missing2 = StructType.fromDDL("c2 STRUCT<c3: INT, c4: STRUCT<c5: INT,
c6: INT>>")
- assert(StructType.findMissingFields(source2, schema, resolver)
- .exists(_.sameType(missing2)))
-
- val source3 = StructType.fromDDL("c1 INT, c2 STRUCT<c3: INT>")
- val missing3 = StructType.fromDDL("c2 STRUCT<c4: STRUCT<c5: INT, c6:
INT>>")
- assert(StructType.findMissingFields(source3, schema, resolver)
- .exists(_.sameType(missing3)))
-
- val source4 = StructType.fromDDL("c1 INT, c2 STRUCT<c3: INT, c4:
STRUCT<c6: INT>>")
- val missing4 = StructType.fromDDL("c2 STRUCT<c4: STRUCT<c5: INT>>")
- assert(StructType.findMissingFields(source4, schema, resolver)
- .exists(_.sameType(missing4)))
+ assert(schema2.merge(schema1, resolver).sameType(StructType.fromDDL(
+ "a2 STRING, a3 DOUBLE, nested STRUCT<b2: STRING, b3: DOUBLE, b1: INT>,
a1 INT"
Review comment:
When `schema2` mergs `schema1`, don't we keep its original case? E.g.
`"A2 STRING, a3 DOUBLE, nested STRUCT<B2: STRING, b3: DOUBLE, b1: INT>, a1 INT"`
##########
File path:
sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
##########
@@ -150,95 +150,36 @@ class StructTypeSuite extends SparkFunSuite with
SQLHelper {
assert(fromDDL(interval).toDDL === interval)
}
- test("find missing (nested) fields") {
- val schema = StructType.fromDDL("c1 INT, c2 STRUCT<c3: INT, c4: STRUCT<c5:
INT, c6: INT>>")
+ test("SPARK-35290: Struct merging case insensitive") {
+ val schema1 = StructType.fromDDL("a1 INT, a2 STRING, nested STRUCT<b1:
INT, b2: STRING>")
+ val schema2 = StructType.fromDDL("A2 STRING, a3 DOUBLE, nested STRUCT<B2:
STRING, b3: DOUBLE>")
val resolver = SQLConf.get.resolver
- val source1 = StructType.fromDDL("c1 INT")
- val missing1 = StructType.fromDDL("c2 STRUCT<c3: INT, c4: STRUCT<c5: INT,
c6: INT>>")
- assert(StructType.findMissingFields(source1, schema, resolver)
- .exists(_.sameType(missing1)))
+ assert(schema1.merge(schema2, resolver).sameType(StructType.fromDDL(
+ "a1 INT, a2 STRING, nested STRUCT<b1: INT, b2: STRING, b3: DOUBLE>, a3
DOUBLE"
+ )))
- val source2 = StructType.fromDDL("c1 INT, c3 STRING")
- val missing2 = StructType.fromDDL("c2 STRUCT<c3: INT, c4: STRUCT<c5: INT,
c6: INT>>")
- assert(StructType.findMissingFields(source2, schema, resolver)
- .exists(_.sameType(missing2)))
-
- val source3 = StructType.fromDDL("c1 INT, c2 STRUCT<c3: INT>")
- val missing3 = StructType.fromDDL("c2 STRUCT<c4: STRUCT<c5: INT, c6:
INT>>")
- assert(StructType.findMissingFields(source3, schema, resolver)
- .exists(_.sameType(missing3)))
-
- val source4 = StructType.fromDDL("c1 INT, c2 STRUCT<c3: INT, c4:
STRUCT<c6: INT>>")
- val missing4 = StructType.fromDDL("c2 STRUCT<c4: STRUCT<c5: INT>>")
- assert(StructType.findMissingFields(source4, schema, resolver)
- .exists(_.sameType(missing4)))
+ assert(schema2.merge(schema1, resolver).sameType(StructType.fromDDL(
+ "a2 STRING, a3 DOUBLE, nested STRUCT<b2: STRING, b3: DOUBLE, b1: INT>,
a1 INT"
Review comment:
When `schema2` merges `schema1`, don't we keep its original case? E.g.
`"A2 STRING, a3 DOUBLE, nested STRUCT<B2: STRING, b3: DOUBLE, b1: INT>, a1 INT"`
##########
File path:
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
##########
@@ -181,7 +100,8 @@ object ResolveUnion extends Rule[LogicalPlan] {
// like that. We will sort columns in the struct expression to
make sure two sides of
// union have consistent schema.
aliased += foundAttr
- Alias(addFields(foundAttr, target), foundAttr.name)()
+ val targetType = target.merge(source, conf.resolver)
Review comment:
BTW, `merge` will throw an exception if two schemas conflict. I recall
that union of conflicting schemas doesn't fail in `ResolveUnion`, but in
`CheckAnalysis`. Could we follow original behavior?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]