cloud-fan commented on code in PR #42393:
URL: https://github.com/apache/spark/pull/42393#discussion_r1305411589
##########
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/TableOutputResolver.scala:
##########
@@ -88,37 +86,24 @@ object TableOutputResolver {
if (actualExpectedCols.size < query.output.size) {
throw QueryCompilationErrors.cannotWriteTooManyColumnsToTableError(
tableName, actualExpectedCols.map(_.name), query)
+ } else if (actualExpectedCols.size > query.output.size && !byName) {
+ throw QueryCompilationErrors.cannotWriteNotEnoughColumnsToTableError(
+ tableName, actualExpectedCols.map(_.name), query)
}
val errors = new mutable.ArrayBuffer[String]()
val resolved: Seq[NamedExpression] = if (byName) {
// If a top-level column does not have a corresponding value in the
input query, fill with
- // the column's default value. We need to pass `fillDefaultValue` as
true here, if the
- // `supportColDefaultValue` parameter is also true.
+ // the column's default value.
reorderColumnsByName(
tableName,
query.output,
actualExpectedCols,
conf,
errors += _,
- fillDefaultValue = supportColDefaultValue)
Review Comment:
why do we change this? It's by purpose that we don't enable default column
value for v2 for now.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]