gengliangwang commented on code in PR #36122:
URL: https://github.com/apache/spark/pull/36122#discussion_r859985176
##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveDefaultColumns.scala:
##########
@@ -45,49 +45,62 @@ import org.apache.spark.sql.types._
* (4, 6)
*
* @param analyzer analyzer to use for processing DEFAULT values stored as
text.
- * @param catalog the catalog to use for looking up the schema of INSERT INTO
table objects.
*/
-case class ResolveDefaultColumns(
- analyzer: Analyzer,
- catalog: SessionCatalog) extends Rule[LogicalPlan] {
+case class ResolveDefaultColumns(analyzer: Analyzer) extends Rule[LogicalPlan]
{
// This field stores the enclosing INSERT INTO command, once we find one.
var enclosingInsert: Option[InsertIntoStatement] = None
// This field stores the schema of the target table of the above command.
var insertTableSchemaWithoutPartitionColumns: Option[StructType] = None
+ // This field records if we've replaced an expression, useful for skipping
unneeded copies.
Review Comment:
Is this change related to the alter column change? If not, let's open a
separate PR for it.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]