gengliangwang commented on code in PR #54658:
URL: https://github.com/apache/spark/pull/54658#discussion_r2898152619


##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSchemaEvolution.scala:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap}
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.trees.TreePattern.COMMAND
+import org.apache.spark.sql.catalyst.types.DataTypeUtils
+import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
+import org.apache.spark.sql.connector.catalog.{CatalogV2Util, 
SupportsTypeEvolution, Table, TableCatalog, TableChange}
+import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
+import org.apache.spark.sql.connector.catalog.TableChange.UpdateColumnType
+import org.apache.spark.sql.errors.{QueryCompilationErrors, 
QueryExecutionErrors}
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types.{ArrayType, AtomicType, DataType, MapType, 
NullType, StructField, StructType}
+
+
+/**
+ * A rule that resolves schema evolution for MERGE INTO.
+ *
+ * This rule will call the DSV2 Catalog to update the schema of the target 
table.
+ */
+object ResolveMergeIntoSchemaEvolution extends Rule[LogicalPlan] {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = 
plan.resolveOperatorsWithPruning(
+    _.containsPattern(COMMAND), ruleId) {
+    // This rule should run only if all assignments are resolved, except those
+    // that will be satisfied by schema evolution
+    case m@MergeIntoTable(_, _, _, _, _, _, _) if m.evaluateSchemaEvolution =>
+      val changes = m.changesForSchemaEvolution
+      if (changes.isEmpty) {
+        m
+      } else {
+        val finalAttrMapping = ArrayBuffer.empty[(Attribute, Attribute)]
+        val newTarget = m.targetTable.transform {
+          case r: DataSourceV2Relation =>
+            val referencedSourceSchema = 
MergeIntoTable.sourceSchemaForSchemaEvolution(m)
+            val newTarget =
+              ResolveSchemaEvolution.performSchemaEvolution(r, 
referencedSourceSchema, changes)
+            val oldTargetOutput = m.targetTable.output
+            val newTargetOutput = newTarget.output
+            val attributeMapping = oldTargetOutput.zip(newTargetOutput)
+            finalAttrMapping ++= attributeMapping
+            newTarget
+        }
+        val res = m.copy(targetTable = newTarget)
+        res.rewriteAttrs(AttributeMap(finalAttrMapping.toSeq))
+      }
+  }
+}
+
+/**
+ * A rule that resolves schema evolution for V2 INSERT commands.
+ *
+ * This rule will call the DSV2 Catalog to update the schema of the target 
table.
+ */
+object ResolveInsertSchemaEvolution extends Rule[LogicalPlan] {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = 
plan.resolveOperatorsWithPruning(
+    _.containsPattern(COMMAND), ruleId) {
+    case v2Write: V2WriteCommand
+        if v2Write.table.resolved && v2Write.query.resolved && 
v2Write.schemaEvolutionEnabled =>
+      val changes = v2Write.changesForSchemaEvolution
+      if (changes.isEmpty) {
+        v2Write
+      } else {
+        v2Write.table match {
+          case r: DataSourceV2Relation =>
+            val newRelation = ResolveSchemaEvolution.performSchemaEvolution(
+              r, v2Write.query.schema, changes, isByName = v2Write.isByName)
+            val attrMapping: Seq[(Attribute, Attribute)] =
+              r.output.zip(newRelation.output)
+            
v2Write.withNewTable(newRelation).rewriteAttrs(AttributeMap(attrMapping))
+          case _ => v2Write
+        }
+      }
+  }
+}
+
+/**
+ * Shared schema evolution utilities used by both MERGE INTO and INSERT schema 
evolution rules.
+ */
+object ResolveSchemaEvolution extends Logging {
+
+  /**
+   * Applies schema evolution changes to a DSV2 relation by altering the table 
schema
+   * through the catalog, then verifying all changes were applied.
+   */
+  def performSchemaEvolution(
+      relation: DataSourceV2Relation,
+      referencedSourceSchema: StructType,
+      changes: Array[TableChange],
+      isByName: Boolean = true): DataSourceV2Relation = {
+    (relation.catalog, relation.identifier) match {
+      case (Some(c: TableCatalog), Some(i)) =>
+        c.alterTable(i, changes: _*)
+        val newTable = c.loadTable(i)
+        val newSchema = CatalogV2Util.v2ColumnsToStructType(newTable.columns())
+        // Check if there are any remaining changes not applied.
+        val remainingChanges =
+          schemaChanges(relation.table, newSchema, referencedSourceSchema, 
isByName = isByName)
+        if (remainingChanges.nonEmpty) {
+          throw 
QueryCompilationErrors.unsupportedTableChangesInAutoSchemaEvolutionError(
+            remainingChanges, i.toQualifiedNameParts(c))
+        }
+        relation.copy(table = newTable, output = 
DataTypeUtils.toAttributes(newSchema))
+      case _ => logWarning(s"Schema Evolution enabled but data source 
$relation " +
+        s"does not support it, skipping.")
+        relation
+    }
+  }
+
+  /**
+   * Computes the set of table changes needed to evolve `originalTarget` schema
+   * to accommodate `originalSource` schema. When `isByName` is true, fields 
are matched
+   * by name. When false, fields are matched by position.
+   */
+  def schemaChanges(
+      relation: Table,
+      originalTarget: StructType,
+      originalSource: StructType,
+      isByName: Boolean): Array[TableChange] =
+    schemaChanges(relation, originalTarget, originalSource, originalTarget, 
originalSource,
+      fieldPath = Array(), isByName = isByName)
+
+  private def schemaChanges(
+      table: Table,
+      current: DataType,
+      newType: DataType,
+      originalTarget: StructType,
+      originalSource: StructType,
+      fieldPath: Array[String],
+      isByName: Boolean): Array[TableChange] = {
+    (current, newType) match {
+      case (StructType(currentFields), StructType(newFields)) =>
+        if (isByName) {
+          schemaChangesByName(
+            table, currentFields, newFields, originalTarget, originalSource, 
fieldPath)
+        } else {
+          schemaChangesByPosition(
+            table, currentFields, newFields, originalTarget, originalSource, 
fieldPath)
+        }
+
+      case (ArrayType(currentElementType, _), ArrayType(newElementType, _)) =>
+        schemaChanges(table, currentElementType, newElementType,
+          originalTarget, originalSource, fieldPath ++ Seq("element"), 
isByName)
+
+      case (MapType(currentKeyType, currentElementType, _),
+      MapType(updateKeyType, updateElementType, _)) =>
+        schemaChanges(table, currentKeyType, updateKeyType, originalTarget, 
originalSource,
+          fieldPath ++ Seq("key"), isByName) ++
+          schemaChanges(table, currentElementType, updateElementType,
+            originalTarget, originalSource, fieldPath ++ Seq("value"), 
isByName)
+
+      case (currentType, newType) if currentType == newType =>
+        // No change needed
+        Array.empty[TableChange]
+
+      case (_, NullType) =>
+        // Don't try to change to NullType.
+        Array.empty[TableChange]
+
+      case (_: AtomicType | NullType, newType: AtomicType) =>
+        Array(TableChange.updateColumnType(fieldPath, newType))
+          .filter(canChangeType(table, _))

Review Comment:
   Silently filtering out type changes the connector doesn't support via 
`.filter(canChangeType(...))` could be confusing: the user requests schema 
evolution but the type change quietly disappears, and the write may then fail 
with a type mismatch error downstream. Consider logging a warning when a type 
change is skipped, e.g. `logInfo(s"Skipping unsupported type change: $change")`.



##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSchemaEvolution.scala:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap}
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.trees.TreePattern.COMMAND
+import org.apache.spark.sql.catalyst.types.DataTypeUtils
+import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
+import org.apache.spark.sql.connector.catalog.{CatalogV2Util, 
SupportsTypeEvolution, Table, TableCatalog, TableChange}
+import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
+import org.apache.spark.sql.connector.catalog.TableChange.UpdateColumnType
+import org.apache.spark.sql.errors.{QueryCompilationErrors, 
QueryExecutionErrors}
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types.{ArrayType, AtomicType, DataType, MapType, 
NullType, StructField, StructType}
+
+
+/**
+ * A rule that resolves schema evolution for MERGE INTO.
+ *
+ * This rule will call the DSV2 Catalog to update the schema of the target 
table.
+ */
+object ResolveMergeIntoSchemaEvolution extends Rule[LogicalPlan] {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = 
plan.resolveOperatorsWithPruning(
+    _.containsPattern(COMMAND), ruleId) {
+    // This rule should run only if all assignments are resolved, except those
+    // that will be satisfied by schema evolution
+    case m@MergeIntoTable(_, _, _, _, _, _, _) if m.evaluateSchemaEvolution =>
+      val changes = m.changesForSchemaEvolution
+      if (changes.isEmpty) {
+        m
+      } else {
+        val finalAttrMapping = ArrayBuffer.empty[(Attribute, Attribute)]
+        val newTarget = m.targetTable.transform {
+          case r: DataSourceV2Relation =>
+            val referencedSourceSchema = 
MergeIntoTable.sourceSchemaForSchemaEvolution(m)
+            val newTarget =
+              ResolveSchemaEvolution.performSchemaEvolution(r, 
referencedSourceSchema, changes)
+            val oldTargetOutput = m.targetTable.output
+            val newTargetOutput = newTarget.output
+            val attributeMapping = oldTargetOutput.zip(newTargetOutput)
+            finalAttrMapping ++= attributeMapping
+            newTarget
+        }
+        val res = m.copy(targetTable = newTarget)
+        res.rewriteAttrs(AttributeMap(finalAttrMapping.toSeq))
+      }
+  }
+}
+
+/**
+ * A rule that resolves schema evolution for V2 INSERT commands.
+ *
+ * This rule will call the DSV2 Catalog to update the schema of the target 
table.
+ */
+object ResolveInsertSchemaEvolution extends Rule[LogicalPlan] {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = 
plan.resolveOperatorsWithPruning(
+    _.containsPattern(COMMAND), ruleId) {
+    case v2Write: V2WriteCommand
+        if v2Write.table.resolved && v2Write.query.resolved && 
v2Write.schemaEvolutionEnabled =>
+      val changes = v2Write.changesForSchemaEvolution
+      if (changes.isEmpty) {
+        v2Write
+      } else {
+        v2Write.table match {
+          case r: DataSourceV2Relation =>
+            val newRelation = ResolveSchemaEvolution.performSchemaEvolution(
+              r, v2Write.query.schema, changes, isByName = v2Write.isByName)
+            val attrMapping: Seq[(Attribute, Attribute)] =
+              r.output.zip(newRelation.output)
+            
v2Write.withNewTable(newRelation).rewriteAttrs(AttributeMap(attrMapping))
+          case _ => v2Write
+        }
+      }
+  }
+}
+
+/**
+ * Shared schema evolution utilities used by both MERGE INTO and INSERT schema 
evolution rules.
+ */
+object ResolveSchemaEvolution extends Logging {
+
+  /**
+   * Applies schema evolution changes to a DSV2 relation by altering the table 
schema
+   * through the catalog, then verifying all changes were applied.
+   */
+  def performSchemaEvolution(
+      relation: DataSourceV2Relation,
+      referencedSourceSchema: StructType,
+      changes: Array[TableChange],
+      isByName: Boolean = true): DataSourceV2Relation = {
+    (relation.catalog, relation.identifier) match {
+      case (Some(c: TableCatalog), Some(i)) =>
+        c.alterTable(i, changes: _*)
+        val newTable = c.loadTable(i)
+        val newSchema = CatalogV2Util.v2ColumnsToStructType(newTable.columns())
+        // Check if there are any remaining changes not applied.
+        val remainingChanges =
+          schemaChanges(relation.table, newSchema, referencedSourceSchema, 
isByName = isByName)
+        if (remainingChanges.nonEmpty) {
+          throw 
QueryCompilationErrors.unsupportedTableChangesInAutoSchemaEvolutionError(
+            remainingChanges, i.toQualifiedNameParts(c))
+        }
+        relation.copy(table = newTable, output = 
DataTypeUtils.toAttributes(newSchema))
+      case _ => logWarning(s"Schema Evolution enabled but data source 
$relation " +
+        s"does not support it, skipping.")
+        relation
+    }
+  }
+
+  /**
+   * Computes the set of table changes needed to evolve `originalTarget` schema
+   * to accommodate `originalSource` schema. When `isByName` is true, fields 
are matched
+   * by name. When false, fields are matched by position.
+   */
+  def schemaChanges(
+      relation: Table,
+      originalTarget: StructType,
+      originalSource: StructType,
+      isByName: Boolean): Array[TableChange] =
+    schemaChanges(relation, originalTarget, originalSource, originalTarget, 
originalSource,
+      fieldPath = Array(), isByName = isByName)
+
+  private def schemaChanges(
+      table: Table,
+      current: DataType,
+      newType: DataType,
+      originalTarget: StructType,
+      originalSource: StructType,
+      fieldPath: Array[String],
+      isByName: Boolean): Array[TableChange] = {
+    (current, newType) match {
+      case (StructType(currentFields), StructType(newFields)) =>
+        if (isByName) {
+          schemaChangesByName(
+            table, currentFields, newFields, originalTarget, originalSource, 
fieldPath)
+        } else {
+          schemaChangesByPosition(
+            table, currentFields, newFields, originalTarget, originalSource, 
fieldPath)
+        }
+
+      case (ArrayType(currentElementType, _), ArrayType(newElementType, _)) =>
+        schemaChanges(table, currentElementType, newElementType,
+          originalTarget, originalSource, fieldPath ++ Seq("element"), 
isByName)

Review Comment:
   When recursing into `ArrayType` elements, `isByName` is passed through 
unchanged. But for arrays, the element matching is always positional regardless 
of whether the top-level insert is by-name. Is this intentional? If the array 
element is a struct, this could produce different behavior depending on the 
`isByName` flag.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to