gengliangwang commented on code in PR #54658:
URL: https://github.com/apache/spark/pull/54658#discussion_r2898152624


##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveSchemaEvolution.scala:
##########
@@ -0,0 +1,263 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.catalyst.analysis
+
+import scala.collection.mutable.ArrayBuffer
+
+import org.apache.spark.internal.Logging
+import org.apache.spark.sql.catalyst.expressions.{Attribute, AttributeMap}
+import org.apache.spark.sql.catalyst.plans.logical._
+import org.apache.spark.sql.catalyst.rules.Rule
+import org.apache.spark.sql.catalyst.trees.TreePattern.COMMAND
+import org.apache.spark.sql.catalyst.types.DataTypeUtils
+import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
+import org.apache.spark.sql.connector.catalog.{CatalogV2Util, 
SupportsTypeEvolution, Table, TableCatalog, TableChange}
+import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
+import org.apache.spark.sql.connector.catalog.TableChange.UpdateColumnType
+import org.apache.spark.sql.errors.{QueryCompilationErrors, 
QueryExecutionErrors}
+import org.apache.spark.sql.execution.datasources.v2.DataSourceV2Relation
+import org.apache.spark.sql.internal.SQLConf
+import org.apache.spark.sql.types.{ArrayType, AtomicType, DataType, MapType, 
NullType, StructField, StructType}
+
+
+/**
+ * A rule that resolves schema evolution for MERGE INTO.
+ *
+ * This rule will call the DSV2 Catalog to update the schema of the target 
table.
+ */
+object ResolveMergeIntoSchemaEvolution extends Rule[LogicalPlan] {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = 
plan.resolveOperatorsWithPruning(
+    _.containsPattern(COMMAND), ruleId) {
+    // This rule should run only if all assignments are resolved, except those
+    // that will be satisfied by schema evolution
+    case m@MergeIntoTable(_, _, _, _, _, _, _) if m.evaluateSchemaEvolution =>
+      val changes = m.changesForSchemaEvolution
+      if (changes.isEmpty) {
+        m
+      } else {
+        val finalAttrMapping = ArrayBuffer.empty[(Attribute, Attribute)]
+        val newTarget = m.targetTable.transform {
+          case r: DataSourceV2Relation =>
+            val referencedSourceSchema = 
MergeIntoTable.sourceSchemaForSchemaEvolution(m)
+            val newTarget =
+              ResolveSchemaEvolution.performSchemaEvolution(r, 
referencedSourceSchema, changes)
+            val oldTargetOutput = m.targetTable.output
+            val newTargetOutput = newTarget.output
+            val attributeMapping = oldTargetOutput.zip(newTargetOutput)
+            finalAttrMapping ++= attributeMapping
+            newTarget
+        }
+        val res = m.copy(targetTable = newTarget)
+        res.rewriteAttrs(AttributeMap(finalAttrMapping.toSeq))
+      }
+  }
+}
+
+/**
+ * A rule that resolves schema evolution for V2 INSERT commands.
+ *
+ * This rule will call the DSV2 Catalog to update the schema of the target 
table.
+ */
+object ResolveInsertSchemaEvolution extends Rule[LogicalPlan] {
+
+  override def apply(plan: LogicalPlan): LogicalPlan = 
plan.resolveOperatorsWithPruning(
+    _.containsPattern(COMMAND), ruleId) {
+    case v2Write: V2WriteCommand
+        if v2Write.table.resolved && v2Write.query.resolved && 
v2Write.schemaEvolutionEnabled =>
+      val changes = v2Write.changesForSchemaEvolution
+      if (changes.isEmpty) {
+        v2Write
+      } else {
+        v2Write.table match {
+          case r: DataSourceV2Relation =>
+            val newRelation = ResolveSchemaEvolution.performSchemaEvolution(
+              r, v2Write.query.schema, changes, isByName = v2Write.isByName)
+            val attrMapping: Seq[(Attribute, Attribute)] =
+              r.output.zip(newRelation.output)
+            
v2Write.withNewTable(newRelation).rewriteAttrs(AttributeMap(attrMapping))
+          case _ => v2Write
+        }
+      }
+  }
+}
+
+/**
+ * Shared schema evolution utilities used by both MERGE INTO and INSERT schema 
evolution rules.
+ */
+object ResolveSchemaEvolution extends Logging {
+
+  /**
+   * Applies schema evolution changes to a DSV2 relation by altering the table 
schema
+   * through the catalog, then verifying all changes were applied.
+   */
+  def performSchemaEvolution(
+      relation: DataSourceV2Relation,
+      referencedSourceSchema: StructType,
+      changes: Array[TableChange],
+      isByName: Boolean = true): DataSourceV2Relation = {
+    (relation.catalog, relation.identifier) match {
+      case (Some(c: TableCatalog), Some(i)) =>
+        c.alterTable(i, changes: _*)
+        val newTable = c.loadTable(i)
+        val newSchema = CatalogV2Util.v2ColumnsToStructType(newTable.columns())
+        // Check if there are any remaining changes not applied.
+        val remainingChanges =
+          schemaChanges(relation.table, newSchema, referencedSourceSchema, 
isByName = isByName)
+        if (remainingChanges.nonEmpty) {
+          throw 
QueryCompilationErrors.unsupportedTableChangesInAutoSchemaEvolutionError(
+            remainingChanges, i.toQualifiedNameParts(c))
+        }
+        relation.copy(table = newTable, output = 
DataTypeUtils.toAttributes(newSchema))
+      case _ => logWarning(s"Schema Evolution enabled but data source 
$relation " +
+        s"does not support it, skipping.")
+        relation
+    }
+  }
+
+  /**
+   * Computes the set of table changes needed to evolve `originalTarget` schema
+   * to accommodate `originalSource` schema. When `isByName` is true, fields 
are matched
+   * by name. When false, fields are matched by position.
+   */
+  def schemaChanges(
+      relation: Table,
+      originalTarget: StructType,
+      originalSource: StructType,
+      isByName: Boolean): Array[TableChange] =
+    schemaChanges(relation, originalTarget, originalSource, originalTarget, 
originalSource,
+      fieldPath = Array(), isByName = isByName)
+
+  private def schemaChanges(
+      table: Table,
+      current: DataType,
+      newType: DataType,
+      originalTarget: StructType,
+      originalSource: StructType,
+      fieldPath: Array[String],
+      isByName: Boolean): Array[TableChange] = {
+    (current, newType) match {
+      case (StructType(currentFields), StructType(newFields)) =>
+        if (isByName) {
+          schemaChangesByName(
+            table, currentFields, newFields, originalTarget, originalSource, 
fieldPath)
+        } else {
+          schemaChangesByPosition(
+            table, currentFields, newFields, originalTarget, originalSource, 
fieldPath)
+        }
+
+      case (ArrayType(currentElementType, _), ArrayType(newElementType, _)) =>
+        schemaChanges(table, currentElementType, newElementType,
+          originalTarget, originalSource, fieldPath ++ Seq("element"), 
isByName)
+
+      case (MapType(currentKeyType, currentElementType, _),
+      MapType(updateKeyType, updateElementType, _)) =>
+        schemaChanges(table, currentKeyType, updateKeyType, originalTarget, 
originalSource,
+          fieldPath ++ Seq("key"), isByName) ++
+          schemaChanges(table, currentElementType, updateElementType,
+            originalTarget, originalSource, fieldPath ++ Seq("value"), 
isByName)
+
+      case (currentType, newType) if currentType == newType =>
+        // No change needed
+        Array.empty[TableChange]
+
+      case (_, NullType) =>
+        // Don't try to change to NullType.
+        Array.empty[TableChange]
+
+      case (_: AtomicType | NullType, newType: AtomicType) =>
+        Array(TableChange.updateColumnType(fieldPath, newType))
+          .filter(canChangeType(table, _))
+
+      case _ =>
+        // Do not support change between atomic and complex types for now
+        throw QueryExecutionErrors.failedToMergeIncompatibleSchemasError(
+          originalTarget, originalSource, null)
+    }
+  }
+
+  /**
+   * Match fields by name: look up each target field in the source by name to 
collect schema
+   * differences. Nested struct fields are also matched by name.
+   */
+  private def schemaChangesByName(
+      table: Table,
+      currentFields: Array[StructField],
+      newFields: Array[StructField],
+      originalTarget: StructType,
+      originalSource: StructType,
+      fieldPath: Array[String]): Array[TableChange] = {
+    val newFieldMap = toFieldMap(newFields)
+
+    // Update existing field types
+    val updates = currentFields.collect {
+      case currentField: StructField if 
newFieldMap.contains(currentField.name) =>
+        schemaChanges(table, currentField.dataType, 
newFieldMap(currentField.name).dataType,
+          originalTarget, originalSource, fieldPath ++ Seq(currentField.name), 
isByName = true)
+    }.flatten
+
+    // Identify the newly added fields and append to the end
+    val currentFieldMap = toFieldMap(currentFields)
+    val adds = newFields.filterNot(f => currentFieldMap.contains(f.name))
+      // Make the type nullable, since existing rows in the table will have 
NULLs for this column.
+      .map(f => TableChange.addColumn(fieldPath ++ Set(f.name), 
f.dataType.asNullable))

Review Comment:
   `fieldPath ++ Set(f.name)` — using `Set` here works for a single element but 
is misleading. `Seq(f.name)` or just `:+` would be more idiomatic. Same on line 
244.



##########
sql/catalyst/src/main/java/org/apache/spark/sql/connector/catalog/SupportsTypeEvolution.java:
##########
@@ -0,0 +1,34 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *    http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.spark.sql.connector.catalog;
+
+import org.apache.spark.annotation.Experimental;
+
+/**
+ * A mix-in interface for {@link Table} type evolution support. Data sources 
can implement this
+ * interface to indicate they support evolving their schema during write 
operations to use a new
+ * data type.
+ *
+ * @since 4.2.0
+ */
+@Experimental
+public interface SupportsTypeEvolution extends Table {
+
+  /** Returns whether the given type change can be applied to the table. **/
+  boolean canChangeType(TableChange.UpdateColumnType change);

Review Comment:
   The Javadoc says "whether the given type change can be applied" but doesn't 
clarify whether this means the data source physically supports the type 
transition, or whether it's a valid/safe transition. Consider documenting the 
expected semantics — e.g., should this return `true` only for lossless 
widening, or can connectors accept lossy casts too?



##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/v2Commands.scala:
##########
@@ -77,6 +77,24 @@ trait V2WriteCommand
     table.skipSchemaResolution || areCompatible(query.output, table.output)
   }
 
+  lazy val schemaEvolutionEnabled: Boolean = withSchemaEvolution && {
+    table match {
+      case r: DataSourceV2Relation if r.autoSchemaEvolution() => true
+      case _ => false
+    }
+  }
+
+  lazy val needSchemaEvolution: Boolean =
+    schemaEvolutionEnabled && changesForSchemaEvolution.nonEmpty
+

Review Comment:
   `changesForSchemaEvolution` calls `ResolveSchemaEvolution.schemaChanges` 
even when `schemaEvolutionEnabled` is false (since `needSchemaEvolution` is the 
only guard). If code paths access `changesForSchemaEvolution` directly, this 
could be wasteful. Consider guarding: `if (!schemaEvolutionEnabled) Array.empty 
else ...`.



##########
sql/catalyst/src/test/scala/org/apache/spark/sql/connector/catalog/InMemoryTableCatalog.scala:
##########
@@ -181,14 +181,24 @@ class BasicInMemoryTableCatalog extends TableCatalog {
 
     table.increaseVersion()
     val currentVersion = table.version()
-    val newTable = new InMemoryTable(
-      name = table.name,
-      columns = CatalogV2Util.structTypeToV2Columns(schema),
-      partitioning = finalPartitioning,
-      properties = properties,
-      constraints = constraints,
-      id = table.id)
-      .alterTableWithData(table.data, schema)
+    val newTable = table match {
+      case _: InMemoryTable =>
+        new InMemoryTable(
+          name = table.name,
+          columns = CatalogV2Util.structTypeToV2Columns(schema),
+          partitioning = finalPartitioning,
+          properties = properties,
+          constraints = constraints,
+          id = table.id)
+          .alterTableWithData(table.data, schema)

Review Comment:
   This pattern match on `InMemoryTable` vs `InMemoryTableWithV2Filter` has no 
default case. If another `InMemoryBaseTable` subclass is introduced, this will 
throw a `MatchError` at runtime. A `case _ =>` with a clear error message would 
be safer.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to