maropu commented on a change in pull request #29587:
URL: https://github.com/apache/spark/pull/29587#discussion_r485283227



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
##########
@@ -50,18 +122,30 @@ object ResolveUnion extends Rule[LogicalPlan] {
       }
     }
 
+    (rightProjectList, aliased)
+  }
+
+  private def unionTwoSides(
+      left: LogicalPlan,
+      right: LogicalPlan,
+      allowMissingCol: Boolean): LogicalPlan = {
+    val rightOutputAttrs = right.output
+
+    // Builds a project list for `right` based on `left` output names
+    val (rightProjectList, aliased) = compareAndAddFields(left, right, 
allowMissingCol)
+
+

Review comment:
       nit: remove this unnecessary blank line.

##########
File path: 
sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
##########
@@ -103,4 +104,80 @@ class StructTypeSuite extends SparkFunSuite {
     val interval = "`a` INTERVAL"
     assert(fromDDL(interval).toDDL === interval)
   }
+
+  test("find missing (nested) fields") {
+    val schema = StructType.fromDDL(
+      "c1 INT, c2 STRUCT<c3: INT, c4: STRUCT<c5: INT, c6: INT>>")
+    val resolver = SQLConf.get.resolver

Review comment:
       Could you add some tests for case-sensitivity?

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala
##########
@@ -536,4 +536,71 @@ class DataFrameSetOperationsSuite extends QueryTest with 
SharedSparkSession {
       assert(union2.schema.fieldNames === Array("a", "B", "C", "c"))
     }
   }
+
+  test("SPARK-32376: Make unionByName null-filling behavior work with struct 
columns - 1") {

Review comment:
       Could you make the test title clearer? e.g.,
   `- 1` -> `simple tests`
   `- 2` -> `nested cases`

##########
File path: 
sql/core/src/test/scala/org/apache/spark/sql/DataFrameSetOperationsSuite.scala
##########
@@ -536,4 +536,71 @@ class DataFrameSetOperationsSuite extends QueryTest with 
SharedSparkSession {
       assert(union2.schema.fieldNames === Array("a", "B", "C", "c"))
     }
   }
+

Review comment:
       Could you add some tests for case-sensitivity here, too?

##########
File path: 
sql/catalyst/src/test/scala/org/apache/spark/sql/types/StructTypeSuite.scala
##########
@@ -103,4 +104,80 @@ class StructTypeSuite extends SparkFunSuite {
     val interval = "`a` INTERVAL"
     assert(fromDDL(interval).toDDL === interval)
   }
+
+  test("find missing (nested) fields") {
+    val schema = StructType.fromDDL(
+      "c1 INT, c2 STRUCT<c3: INT, c4: STRUCT<c5: INT, c6: INT>>")
+    val resolver = SQLConf.get.resolver
+
+    val source1 = StructType.fromDDL("c1 INT")
+    val missing1 = StructType.fromDDL(
+      "c2 STRUCT<c3: INT, c4: STRUCT<c5: INT, c6: INT>>")
+    assert(StructType.findMissingFields(source1, schema, resolver)
+      .map(_.sameType(missing1)).getOrElse(false))

Review comment:
       nit: could we use `.exists()` instead?

##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
##########
@@ -17,29 +17,101 @@
 
 package org.apache.spark.sql.catalyst.analysis
 
+import scala.collection.mutable
+
 import org.apache.spark.sql.AnalysisException
-import org.apache.spark.sql.catalyst.expressions.{Alias, Literal}
+import org.apache.spark.sql.catalyst.expressions.{Alias, Attribute, 
Expression, Literal, NamedExpression, WithFields}
 import org.apache.spark.sql.catalyst.optimizer.CombineUnions
 import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, 
Union}
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types._
 import org.apache.spark.sql.util.SchemaUtils
 
 /**
  * Resolves different children of Union to a common set of columns.
  */
 object ResolveUnion extends Rule[LogicalPlan] {
-  private def unionTwoSides(
+  /**
+   * Adds missing fields recursively into given `col` expression, based on the 
target `StructType`.
+   * For example, given `col` as "a struct<a:int, b:int>, b int" and `target` 
as
+   * "a struct<a:int, b:int, c:long>, b int, c string", this method should add 
`a.c` and `c` to
+   * `col` expression.
+   */
+  private def addFields(col: NamedExpression, target: StructType): 
Option[Expression] = {
+    assert(col.dataType.isInstanceOf[StructType], "Only support StructType.")
+
+    val resolver = SQLConf.get.resolver
+    val missingFields =
+      StructType.findMissingFields(col.dataType.asInstanceOf[StructType], 
target, resolver)
+    if (missingFields.isEmpty) {
+      None
+    } else {
+      missingFields.map(s => addFieldsInto(col, "", s.fields))
+    }
+  }
+
+  /**
+   * Adds missing fields recursively into given `col` expression. The missing 
fields are given
+   * in `fields`. For example, given `col` as "a struct<a:int, b:int>, b int", 
and `fields` is
+   * "a struct<c:long>, c string". This method will add a nested `a.c` field 
and a top-level
+   * `c` field to `col` and fill null values for them.
+   */
+  private def addFieldsInto(col: Expression, base: String, fields: 
Seq[StructField]): Expression = {
+    fields.foldLeft(col) { case (currCol, field) =>
+      field.dataType match {
+        case st: StructType =>
+          val resolver = SQLConf.get.resolver
+          val colField = currCol.dataType.asInstanceOf[StructType]
+            .find(f => resolver(f.name, field.name))
+          if (colField.isEmpty) {
+            // The whole struct is missing. Add a null.
+            WithFields(currCol, s"$base${field.name}", Literal(null, st),
+              sortOutputColumns = true)
+          } else {
+            addFieldsInto(currCol, s"$base${field.name}.", st.fields)
+          }
+        case dt =>
+          // We need to sort columns in result, because we might add another 
column in other side.
+          // E.g., we want to union two structs "a int, b long" and "a int, c 
string".
+          // If we don't sort, we will have "a int, b long, c string" and "a 
int, c string, b long",
+          // which are not compatible.
+          WithFields(currCol, s"$base${field.name}", Literal(null, dt),
+            sortOutputColumns = true)
+      }
+    }
+  }
+
+  private def compareAndAddFields(
       left: LogicalPlan,
       right: LogicalPlan,
-      allowMissingCol: Boolean): LogicalPlan = {
+      allowMissingCol: Boolean): (Seq[NamedExpression], Seq[NamedExpression]) 
= {
     val resolver = SQLConf.get.resolver
     val leftOutputAttrs = left.output
     val rightOutputAttrs = right.output
 
-    // Builds a project list for `right` based on `left` output names
+    val aliased = mutable.ArrayBuffer.empty[Attribute]
+
     val rightProjectList = leftOutputAttrs.map { lattr =>
-      rightOutputAttrs.find { rattr => resolver(lattr.name, rattr.name) 
}.getOrElse {
+      val found = rightOutputAttrs.find { rattr => resolver(lattr.name, 
rattr.name) }
+      if (found.isDefined) {
+        val foundDt = found.get.dataType
+        (foundDt, lattr.dataType) match {
+          case (source: StructType, target: StructType)
+              if allowMissingCol && !source.sameType(target) =>

Review comment:
       To make the logic simpler, could we filter out all the unsupported case 
(e.g., `nested struct in array`) here? This is it like this;
   ```
             case (source: StructType, target: StructType)
                 if allowMissingCol && canMergeSchemas(source, target) =>
   ```




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to