This is an automated email from the ASF dual-hosted git repository.
wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git
The following commit(s) were added to refs/heads/master by this push:
new 889c04879fc2 [SPARK-55825][SQL] Rename `_LEGACY_ERROR_TEMP_1309` to
`PARTITION_BY_NOT_ALLOWED_WITH_INSERT_INTO`
889c04879fc2 is described below
commit 889c04879fc28abed047b1ba1c98f7f2130e4260
Author: ilicmarkodb <[email protected]>
AuthorDate: Thu Mar 5 00:16:12 2026 +0800
[SPARK-55825][SQL] Rename `_LEGACY_ERROR_TEMP_1309` to
`PARTITION_BY_NOT_ALLOWED_WITH_INSERT_INTO`
### What changes were proposed in this pull request?
Rename the legacy error class `_LEGACY_ERROR_TEMP_1309` to
`PARTITION_BY_NOT_ALLOWED_WITH_INSERT_INTO` and add SQL state `42601`.
This error is thrown when a user calls `insertInto()` together with
`partitionBy()` on a `DataFrameWriter`, which is an invalid API usage since the
target table's partition columns are already defined.
### Why are the changes needed?
Proper error messaging.
### Does this PR introduce _any_ user-facing change?
Yes. The error class name changes from `_LEGACY_ERROR_TEMP_1309` to
`PARTITION_BY_NOT_ALLOWED_WITH_INSERT_INTO`, and the SQL state `42601` is now
included. The error message text remains identical.
### How was this patch tested?
Existing test in `InsertSuite.scala` already covers this error and has been
updated with the new error class name.
### Was this patch authored or co-authored using generative AI tooling?
Yes, co-authored with Claude Code.
Closes #54609 from ilicmarkodb/rename_LEGACY_ERROR_TEMP_1309.
Authored-by: ilicmarkodb <[email protected]>
Signed-off-by: Wenchen Fan <[email protected]>
---
common/utils/src/main/resources/error/error-conditions.json | 11 ++++++-----
.../org/apache/spark/sql/errors/QueryCompilationErrors.scala | 6 +++---
.../scala/org/apache/spark/sql/classic/DataFrameWriter.scala | 2 +-
.../test/scala/org/apache/spark/sql/hive/InsertSuite.scala | 4 ++--
4 files changed, 12 insertions(+), 11 deletions(-)
diff --git a/common/utils/src/main/resources/error/error-conditions.json
b/common/utils/src/main/resources/error/error-conditions.json
index e8a297403f27..3e99d14baeae 100644
--- a/common/utils/src/main/resources/error/error-conditions.json
+++ b/common/utils/src/main/resources/error/error-conditions.json
@@ -5150,6 +5150,12 @@
],
"sqlState" : "428FT"
},
+ "PARTITION_BY_NOT_ALLOWED_WITH_INSERT_INTO" : {
+ "message" : [
+ "partitionBy() cannot be used with insertInto(). Partition columns for
table <tableName> are determined by the table definition."
+ ],
+ "sqlState" : "42601"
+ },
"PARTITION_COLUMN_NOT_FOUND_IN_SCHEMA" : {
"message" : [
"Partition column <column> not found in schema <schema>. Please provide
the existing column for partitioning."
@@ -8847,11 +8853,6 @@
"There is a 'path' option set and save() is called with a path
parameter. Either remove the path option, or call save() without the parameter.
To ignore this check, set '<config>' to 'true'."
]
},
- "_LEGACY_ERROR_TEMP_1309" : {
- "message" : [
- "insertInto() can't be used together with partitionBy(). Partition
columns have already been defined for the table. It is not necessary to use
partitionBy()."
- ]
- },
"_LEGACY_ERROR_TEMP_1310" : {
"message" : [
"Couldn't find a catalog to handle the identifier <quote>."
diff --git
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index e13a3ab3aad9..494f0b07629b 100644
---
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -3559,10 +3559,10 @@ private[sql] object QueryCompilationErrors extends
QueryErrorsBase with Compilat
"createMode" -> toDSOption(createMode)))
}
- def partitionByDoesNotAllowedWhenUsingInsertIntoError(): Throwable = {
+ def partitionByDoesNotAllowedWhenUsingInsertIntoError(tableName: String):
Throwable = {
new AnalysisException(
- errorClass = "_LEGACY_ERROR_TEMP_1309",
- messageParameters = Map.empty)
+ errorClass = "PARTITION_BY_NOT_ALLOWED_WITH_INSERT_INTO",
+ messageParameters = Map("tableName" -> tableName))
}
def cannotFindCatalogToHandleIdentifierError(quote: String): Throwable = {
diff --git
a/sql/core/src/main/scala/org/apache/spark/sql/classic/DataFrameWriter.scala
b/sql/core/src/main/scala/org/apache/spark/sql/classic/DataFrameWriter.scala
index a29fcc3d1eca..079e92451021 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/classic/DataFrameWriter.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/classic/DataFrameWriter.scala
@@ -314,7 +314,7 @@ final class DataFrameWriter[T] private[sql](ds: Dataset[T])
extends sql.DataFram
assertNotBucketed("insertInto")
if (partitioningColumns.isDefined) {
- throw
QueryCompilationErrors.partitionByDoesNotAllowedWhenUsingInsertIntoError()
+ throw
QueryCompilationErrors.partitionByDoesNotAllowedWhenUsingInsertIntoError(tableName)
}
val session = df.sparkSession
diff --git
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
index 2e45307d9102..3dc6d7e9097c 100644
--- a/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
+++ b/sql/hive/src/test/scala/org/apache/spark/sql/hive/InsertSuite.scala
@@ -351,8 +351,8 @@ class InsertSuite extends QueryTest with TestHiveSingleton
with BeforeAndAfter
exception = intercept[AnalysisException] {
Seq((1, 2, 3, 4)).toDF("a", "b", "c", "d").write.partitionBy("b",
"c").insertInto(tableName)
},
- condition = "_LEGACY_ERROR_TEMP_1309",
- parameters = Map.empty
+ condition = "PARTITION_BY_NOT_ALLOWED_WITH_INSERT_INTO",
+ parameters = Map("tableName" -> tableName)
)
}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]