viirya commented on a change in pull request #29587:
URL: https://github.com/apache/spark/pull/29587#discussion_r505677527



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveUnion.scala
##########
@@ -17,29 +17,190 @@
 
 package org.apache.spark.sql.catalyst.analysis
 
+import scala.collection.mutable
+
 import org.apache.spark.sql.AnalysisException
-import org.apache.spark.sql.catalyst.expressions.{Alias, Literal}
+import org.apache.spark.sql.catalyst.expressions._
 import org.apache.spark.sql.catalyst.optimizer.CombineUnions
 import org.apache.spark.sql.catalyst.plans.logical.{LogicalPlan, Project, 
Union}
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types._
 import org.apache.spark.sql.util.SchemaUtils
+import org.apache.spark.unsafe.types.UTF8String
 
 /**
  * Resolves different children of Union to a common set of columns.
  */
 object ResolveUnion extends Rule[LogicalPlan] {
-  private def unionTwoSides(
+  /**
+   * This method sorts recursively columns in a struct expression based on 
column names.
+   */
+  private def sortStructFields(expr: Expression): Expression = {
+    val existingExprs = 
expr.dataType.asInstanceOf[StructType].fieldNames.zipWithIndex.map {
+      case (name, i) =>
+        val fieldExpr = GetStructField(KnownNotNull(expr), i)
+        if (fieldExpr.dataType.isInstanceOf[StructType]) {
+          (name, sortStructFields(fieldExpr))
+        } else {
+          (name, fieldExpr)
+        }
+    }.sortBy(_._1).flatMap(pair => Seq(Literal(pair._1), pair._2))
+
+    val newExpr = CreateNamedStruct(existingExprs)
+    if (expr.nullable) {
+      If(IsNull(expr), Literal(null, newExpr.dataType), newExpr)
+    } else {
+      newExpr
+    }
+  }
+
+  /**
+   * Assumes input expressions are field expression of `CreateNamedStruct`. 
This method
+   * sorts the expressions based on field names.
+   */
+  private def sortFieldExprs(fieldExprs: Seq[Expression]): Seq[Expression] = {
+    fieldExprs.grouped(2).map { e =>
+      Seq(e.head, e.last)
+    }.toSeq.sortBy { pair =>
+      assert(pair.head.isInstanceOf[Literal])
+      pair.head.eval().asInstanceOf[UTF8String].toString
+    }.flatten
+  }
+
+  /**
+   * This helper method sorts fields in a `UpdateFields` expression by field 
name.
+   */
+  private def sortStructFieldsInWithFields(expr: Expression): Expression = 
expr transformUp {
+    case u: UpdateFields if u.resolved =>
+      u.evalExpr match {
+        case i @ If(IsNull(_), _, CreateNamedStruct(fieldExprs)) =>
+          val sorted = sortFieldExprs(fieldExprs)
+          val newStruct = CreateNamedStruct(sorted)
+          i.copy(trueValue = Literal(null, newStruct.dataType), falseValue = 
newStruct)
+        case CreateNamedStruct(fieldExprs) =>
+          val sorted = sortFieldExprs(fieldExprs)
+          val newStruct = CreateNamedStruct(sorted)
+          newStruct
+        case other =>
+          throw new AnalysisException(s"`UpdateFields` has incorrect eval 
expression: $other. " +
+            "Please file a bug report with this error message, stack trace, 
and the query.")
+      }
+  }
+
+  def simplifyWithFields(expr: Expression): Expression = {
+    expr.transformUp {
+      case UpdateFields(UpdateFields(struct, fieldOps1), fieldOps2) =>
+        UpdateFields(struct, fieldOps1 ++ fieldOps2)
+    }
+  }
+
+  /**
+   * Adds missing fields recursively into given `col` expression, based on the 
target `StructType`.
+   * This is called by `compareAndAddFields` when we find two struct columns 
with same name but
+   * different nested fields. This method will find out the missing nested 
fields from `col` to
+   * `target` struct and add these missing nested fields. Currently we don't 
support finding out
+   * missing nested fields of struct nested in array or struct nested in map.
+   */
+  private def addFields(col: NamedExpression, target: StructType): Expression 
= {
+    assert(col.dataType.isInstanceOf[StructType], "Only support StructType.")
+
+    val resolver = SQLConf.get.resolver
+    val missingFields =
+      StructType.findMissingFields(col.dataType.asInstanceOf[StructType], 
target, resolver)
+
+    // We need to sort columns in result, because we might add another column 
in other side.
+    // E.g., we want to union two structs "a int, b long" and "a int, c 
string".
+    // If we don't sort, we will have "a int, b long, c string" and
+    // "a int, c string, b long", which are not compatible.
+    if (missingFields.isEmpty) {
+      sortStructFields(col)
+    } else {
+      missingFields.map { s =>
+        val struct = addFieldsInto(col, s.fields)
+        // Combines `WithFields`s to reduce expression tree.
+        val reducedStruct = simplifyWithFields(struct)
+        val sorted = sortStructFieldsInWithFields(reducedStruct)
+        sorted
+      }.get
+    }
+  }
+
+  /**
+   * Adds missing fields recursively into given `col` expression. The missing 
fields are given
+   * in `fields`. For example, given `col` as "z struct<z:int, y:int>, x int", 
and `fields` is
+   * "z struct<w:long>, w string". This method will add a nested `z.w` field 
and a top-level
+   * `w` field to `col` and fill null values for them. Note that because we 
might also add missing
+   * fields at other side of Union, we must make sure corresponding attributes 
at two sides have
+   * same field order in structs, so when we adding missing fields, we will 
sort the fields based on
+   * field names. So the data type of returned expression will be
+   * "w string, x int, z struct<w:long, y:int, z:int>".
+   */
+  private def addFieldsInto(
+      col: Expression,
+      fields: Seq[StructField]): Expression = {
+    fields.foldLeft(col) { case (currCol, field) =>
+      field.dataType match {
+        case st: StructType =>
+          val resolver = SQLConf.get.resolver
+          val colField = currCol.dataType.asInstanceOf[StructType]
+            .find(f => resolver(f.name, field.name))
+          if (colField.isEmpty) {
+            // The whole struct is missing. Add a null.
+            UpdateFields(currCol, field.name, Literal(null, st))
+          } else {
+            UpdateFields(currCol, field.name,
+              addFieldsInto(ExtractValue(currCol, Literal(field.name), 
resolver), st.fields))
+          }
+        case dt =>
+          UpdateFields(currCol, field.name, Literal(null, dt))

Review comment:
       Good question. `byName` support actually means we need to adjust columns 
between two sides to have a consistent schema. It could be top-level or nested 
column cases.
   
   So it is actually the same issue as 
https://github.com/apache/spark/pull/29587#discussion_r502837273, a.k.a 
adjusting the nested columns to have a more natural schema. As replied in the 
discussion, I plan to do it in followup.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to