This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 94031ead786 [SPARK-44004][SQL] Assign name & improve error message for 
frequent LEGACY errors
94031ead786 is described below

commit 94031ead78682bd5c1adab8b87e61055968c8998
Author: itholic <haejoon....@databricks.com>
AuthorDate: Wed Jun 21 10:36:04 2023 +0300

    [SPARK-44004][SQL] Assign name & improve error message for frequent LEGACY 
errors
    
    ### What changes were proposed in this pull request?
    
    This PR proposes to assign name & improve error message for frequent LEGACY 
errors.
    
    ### Why are the changes needed?
    
    To improve the errors that most frequently occurring.
    
    ### Does this PR introduce _any_ user-facing change?
    
    No API changes, it's only for errors.
    
    ### How was this patch tested?
    
    The existing CI should passed.
    
    Closes #41504 from itholic/naming_top_error_class.
    
    Authored-by: itholic <haejoon....@databricks.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json   | 80 +++++++++++-----------
 .../spark/sql/catalyst/analysis/Analyzer.scala     |  4 +-
 .../catalyst/analysis/ResolveInlineTables.scala    |  5 +-
 .../spark/sql/catalyst/analysis/unresolved.scala   |  3 +-
 .../spark/sql/errors/QueryCompilationErrors.scala  | 22 +++---
 .../spark/sql/errors/QueryParsingErrors.scala      |  2 +-
 .../sql/catalyst/analysis/AnalysisErrorSuite.scala |  5 +-
 .../catalyst/analysis/ResolveSubquerySuite.scala   | 11 ++-
 .../catalyst/parser/ExpressionParserSuite.scala    | 10 +--
 .../analyzer-results/ansi/literals.sql.out         | 10 +--
 .../columnresolution-negative.sql.out              |  6 +-
 .../analyzer-results/join-lateral.sql.out          |  6 +-
 .../sql-tests/analyzer-results/literals.sql.out    | 10 +--
 .../analyzer-results/postgreSQL/boolean.sql.out    |  5 +-
 .../postgreSQL/window_part3.sql.out                |  5 +-
 .../postgreSQL/window_part4.sql.out                |  5 +-
 .../table-valued-functions.sql.out                 |  4 +-
 .../sql-tests/results/ansi/literals.sql.out        | 10 +--
 .../results/columnresolution-negative.sql.out      |  6 +-
 .../sql-tests/results/join-lateral.sql.out         |  6 +-
 .../resources/sql-tests/results/literals.sql.out   | 10 +--
 .../sql-tests/results/postgreSQL/boolean.sql.out   |  5 +-
 .../results/postgreSQL/window_part3.sql.out        |  5 +-
 .../results/postgreSQL/window_part4.sql.out        |  5 +-
 .../results/table-valued-functions.sql.out         |  4 +-
 .../scala/org/apache/spark/sql/SQLQuerySuite.scala | 12 ++--
 .../spark/sql/connector/DataSourceV2SQLSuite.scala |  6 +-
 .../spark/sql/execution/SQLViewTestSuite.scala     |  4 +-
 28 files changed, 134 insertions(+), 132 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index d9e729effeb..e35adcfbb5a 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -157,6 +157,11 @@
     ],
     "sqlState" : "22018"
   },
+  "CANNOT_PARSE_INTERVAL" : {
+    "message" : [
+      "Unable to parse <intervalString>. Please ensure that the value provided 
is in a valid format for defining an interval. You can reference the 
documentation for the correct format. If the issue persists, please double 
check that the input value is not null or empty and try again."
+    ]
+  },
   "CANNOT_PARSE_JSON_FIELD" : {
     "message" : [
       "Cannot parse the field name <fieldName> and the value <fieldValue> of 
the JSON token type <jsonType> to target Spark data type <dataType>."
@@ -191,6 +196,11 @@
     ],
     "sqlState" : "0AKD0"
   },
+  "CANNOT_RESOLVE_STAR_EXPAND" : {
+    "message" : [
+      "Cannot resolve <targetString>.* given input columns <columns>. Please 
check that the specified table or struct exists and is accessible in the input 
columns."
+    ]
+  },
   "CANNOT_RESTORE_PERMISSIONS_FOR_PATH" : {
     "message" : [
       "Failed to set permissions on created path <path> back to <permission>."
@@ -689,6 +699,11 @@
     ],
     "sqlState" : "42K04"
   },
+  "FAILED_SQL_EXPRESSION_EVALUATION" : {
+    "message" : [
+      "Failed to evaluate the SQL expression: <sqlExpr>. Please check your 
syntax and ensure all required tables and columns are available."
+    ]
+  },
   "FIELD_NOT_FOUND" : {
     "message" : [
       "No such struct field <fieldName> in <fields>."
@@ -1222,6 +1237,11 @@
       }
     }
   },
+  "INVALID_NUMERIC_LITERAL_RANGE" : {
+    "message" : [
+      "Numeric literal <rawStrippedQualifier> is outside the valid range for 
<typeName> with minimum value of <minValue> and maximum value of <maxValue>. 
Please adjust the value accordingly."
+    ]
+  },
   "INVALID_OPTIONS" : {
     "message" : [
       "Invalid options:"
@@ -1497,6 +1517,11 @@
       "The url is invalid: <url>. If necessary set <ansiConfig> to \"false\" 
to bypass this error."
     ]
   },
+  "INVALID_VIEW_TEXT" : {
+    "message" : [
+      "The view <viewName> cannot be displayed due to invalid view text: 
<viewText>. This may be caused by an unauthorized modification of the view or 
an incorrect query syntax. Please check your query syntax and verify that the 
view has not been tampered with."
+    ]
+  },
   "INVALID_WHERE_CONDITION" : {
     "message" : [
       "The WHERE condition <condition> contains invalid expressions: 
<expressionList>.",
@@ -2061,6 +2086,11 @@
     ],
     "sqlState" : "54000"
   },
+  "UDTF_ALIAS_NUMBER_MISMATCH" : {
+    "message" : [
+      "The number of aliases supplied in the AS clause does not match the 
number of columns output by the UDTF. Expected <aliasesSize> aliases, but got 
<aliasesNames>. Please ensure that the number of aliases provided matches the 
number of columns output by the UDTF."
+    ]
+  },
   "UNABLE_TO_ACQUIRE_MEMORY" : {
     "message" : [
       "Unable to acquire <requestedBytes> bytes of memory, got 
<receivedBytes>."
@@ -2131,6 +2161,11 @@
     ],
     "sqlState" : "42704"
   },
+  "UNRESOLVABLE_TABLE_VALUED_FUNCTION" : {
+    "message" : [
+      "Could not resolve <name> to a table-valued function. Please make sure 
that <name> is defined as a table-valued function and that all required 
parameters are provided correctly. If <name> is not defined, please create the 
table-valued function before using it. For more information about defining 
table-valued functions, please refer to the Apache Spark documentation."
+    ]
+  },
   "UNRESOLVED_ALL_IN_GROUP_BY" : {
     "message" : [
       "Cannot infer grouping columns for GROUP BY ALL based on the select 
clause. Please explicitly specify the grouping columns."
@@ -2225,6 +2260,11 @@
     ],
     "sqlState" : "0A000"
   },
+  "UNSUPPORTED_DATA_SOURCE_FOR_DIRECT_QUERY" : {
+    "message" : [
+      "The direct query on files does not support the data source type: 
<className>. Please try a different data source type or consider using a 
different query method."
+    ]
+  },
   "UNSUPPORTED_DEFAULT_VALUE" : {
     "message" : [
       "DEFAULT column values is not supported."
@@ -2724,11 +2764,6 @@
       "Function trim doesn't support with type <trimOption>. Please use BOTH, 
LEADING or TRAILING as trim type."
     ]
   },
-  "_LEGACY_ERROR_TEMP_0023" : {
-    "message" : [
-      "Numeric literal <rawStrippedQualifier> does not fit in range 
[<minValue>, <maxValue>] for type <typeName>."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_0024" : {
     "message" : [
       "Can only have a single from-to unit in the interval literal syntax."
@@ -3005,11 +3040,6 @@
       "Number of column aliases does not match number of columns. Number of 
column aliases: <columnSize>; number of columns: <outputSize>."
     ]
   },
-  "_LEGACY_ERROR_TEMP_1029" : {
-    "message" : [
-      "The number of aliases supplied in the AS clause does not match the 
number of columns output by the UDTF expected <aliasesSize> aliases but got 
<aliasesNames>."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_1030" : {
     "message" : [
       "Window aggregate function with filter predicate is not supported yet."
@@ -3091,11 +3121,6 @@
       "Can only star expand struct data types. Attribute: `<attributes>`."
     ]
   },
-  "_LEGACY_ERROR_TEMP_1051" : {
-    "message" : [
-      "Cannot resolve '<targetString>.*' given input columns '<columns>'."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_1052" : {
     "message" : [
       "ADD COLUMN with v1 tables cannot specify NOT NULL."
@@ -3481,11 +3506,6 @@
       "Column <colName> not found in schema <tableSchema>."
     ]
   },
-  "_LEGACY_ERROR_TEMP_1157" : {
-    "message" : [
-      "Unsupported data source type for direct query on files: <className>."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_1158" : {
     "message" : [
       "Saving data into a view is not allowed."
@@ -4093,11 +4113,6 @@
       "Invalid partition transformation: <expr>."
     ]
   },
-  "_LEGACY_ERROR_TEMP_1318" : {
-    "message" : [
-      "Unable to parse '<delayThreshold>'."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_1319" : {
     "message" : [
       "Invalid join type in joinWith: <joinType>."
@@ -4164,11 +4179,6 @@
       "<errorMessage>"
     ]
   },
-  "_LEGACY_ERROR_TEMP_1333" : {
-    "message" : [
-      "Invalid view text: <viewText>. The view <tableName> may have been 
tampered with."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_1334" : {
     "message" : [
       "Cannot specify both version and timestamp when time travelling the 
table."
@@ -5504,11 +5514,6 @@
       "Number of given aliases does not match number of output columns. 
Function name: <funcName>; number of aliases: <aliasesNum>; number of output 
columns: <outColsNum>."
     ]
   },
-  "_LEGACY_ERROR_TEMP_2308" : {
-    "message" : [
-      "could not resolve `<name>` to a table-valued function."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_2309" : {
     "message" : [
       "cannot resolve <sqlExpr> in MERGE command given columns [<cols>]."
@@ -5614,11 +5619,6 @@
       "Cannot change nullable column to non-nullable: <fieldName>."
     ]
   },
-  "_LEGACY_ERROR_TEMP_2331" : {
-    "message" : [
-      "failed to evaluate expression <sqlExpr>: <msg>"
-    ]
-  },
   "_LEGACY_ERROR_TEMP_2433" : {
     "message" : [
       "Only a single table generating function is allowed in a SELECT clause, 
found:",
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
index 488c39d5dd8..3f07f0f5032 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/Analyzer.scala
@@ -2071,8 +2071,8 @@ class Analyzer(override val catalogManager: 
CatalogManager) extends RuleExecutor
           } catch {
             case _: NoSuchFunctionException =>
               u.failAnalysis(
-                errorClass = "_LEGACY_ERROR_TEMP_2308",
-                messageParameters = Map("name" -> u.name.quoted))
+                errorClass = "UNRESOLVABLE_TABLE_VALUED_FUNCTION",
+                messageParameters = Map("name" -> toSQLId(u.name)))
           }
         }
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
index 3952ef71b64..4447b96c332 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/ResolveInlineTables.scala
@@ -25,6 +25,7 @@ import 
org.apache.spark.sql.catalyst.plans.logical.{LocalRelation, LogicalPlan}
 import org.apache.spark.sql.catalyst.rules.Rule
 import org.apache.spark.sql.catalyst.trees.AlwaysProcess
 import org.apache.spark.sql.catalyst.types.DataTypeUtils
+import org.apache.spark.sql.catalyst.util.TypeUtils.toSQLExpr
 import org.apache.spark.sql.types.{StructField, StructType}
 
 /**
@@ -116,8 +117,8 @@ object ResolveInlineTables extends Rule[LogicalPlan] with 
CastSupport with Alias
         } catch {
           case NonFatal(ex) =>
             table.failAnalysis(
-              errorClass = "_LEGACY_ERROR_TEMP_2331",
-              messageParameters = Map("sqlExpr" -> e.sql, "msg" -> 
ex.getMessage),
+              errorClass = "FAILED_SQL_EXPRESSION_EVALUATION",
+              messageParameters = Map("sqlExpr" -> toSQLExpr(e)),
               cause = ex)
         }
       })
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala
index 079d7564623..0141e66c479 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/unresolved.scala
@@ -25,6 +25,7 @@ import org.apache.spark.sql.catalyst.parser.CatalystSqlParser
 import org.apache.spark.sql.catalyst.plans.logical.{LeafNode, LogicalPlan, 
UnaryNode}
 import org.apache.spark.sql.catalyst.trees.TreePattern._
 import org.apache.spark.sql.catalyst.util._
+import org.apache.spark.sql.catalyst.util.TypeUtils.toSQLId
 import org.apache.spark.sql.errors.{QueryCompilationErrors, 
QueryExecutionErrors}
 import org.apache.spark.sql.types.{DataType, Metadata, StructType}
 import org.apache.spark.sql.util.CaseInsensitiveStringMap
@@ -479,7 +480,7 @@ case class UnresolvedStar(target: Option[Seq[String]]) 
extends Star with Unevalu
           throw 
QueryCompilationErrors.starExpandDataTypeNotSupportedError(target.get)
       }
     } else {
-      val from = input.inputSet.map(_.name).mkString(", ")
+      val from = input.inputSet.map(_.name).map(toSQLId).mkString(", ")
       val targetString = target.get.mkString(".")
       throw 
QueryCompilationErrors.cannotResolveStarExpandGivenInputColumnsError(
         targetString, from)
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index 1b5062e985b..91ebc12b5cd 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -553,7 +553,7 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase {
   def aliasesNumberNotMatchUDTFOutputError(
       aliasesSize: Int, aliasesNames: String): Throwable = {
     new AnalysisException(
-      errorClass = "_LEGACY_ERROR_TEMP_1029",
+      errorClass = "UDTF_ALIAS_NUMBER_MISMATCH",
       messageParameters = Map(
         "aliasesSize" -> aliasesSize.toString,
         "aliasesNames" -> aliasesNames))
@@ -727,9 +727,9 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase {
   def cannotResolveStarExpandGivenInputColumnsError(
       targetString: String, columns: String): Throwable = {
     new AnalysisException(
-      errorClass = "_LEGACY_ERROR_TEMP_1051",
+      errorClass = "CANNOT_RESOLVE_STAR_EXPAND",
       messageParameters = Map(
-        "targetString" -> targetString,
+        "targetString" -> toSQLId(targetString),
         "columns" -> columns))
   }
 
@@ -1621,7 +1621,7 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase {
 
   def unsupportedDataSourceTypeForDirectQueryOnFilesError(className: String): 
Throwable = {
     new AnalysisException(
-      errorClass = "_LEGACY_ERROR_TEMP_1157",
+      errorClass = "UNSUPPORTED_DATA_SOURCE_FOR_DIRECT_QUERY",
       messageParameters = Map("className" -> className))
   }
 
@@ -2975,11 +2975,11 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase {
     )
   }
 
-  def cannotParseIntervalError(delayThreshold: String, e: Throwable): 
Throwable = {
-    val threshold = if (delayThreshold == null) "" else delayThreshold
+  def cannotParseIntervalError(intervalString: String, e: Throwable): 
Throwable = {
+    val threshold = if (intervalString == null) "" else intervalString
     new AnalysisException(
-      errorClass = "_LEGACY_ERROR_TEMP_1318",
-      messageParameters = Map("delayThreshold" -> threshold),
+      errorClass = "CANNOT_PARSE_INTERVAL",
+      messageParameters = Map("intervalString" -> toSQLValue(threshold, 
StringType)),
       cause = Some(e))
   }
 
@@ -3115,12 +3115,12 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase {
       messageParameters = Map("errorMessage" -> errorMessage))
   }
 
-  def invalidViewText(viewText: String, tableName: String): Throwable = {
+  def invalidViewText(viewText: String, viewName: String): Throwable = {
     new AnalysisException(
-      errorClass = "_LEGACY_ERROR_TEMP_1333",
+      errorClass = "INVALID_VIEW_TEXT",
       messageParameters = Map(
         "viewText" -> viewText,
-        "tableName" -> tableName))
+        "viewName" -> toSQLId(viewName)))
   }
 
   def invalidTimeTravelSpecError(): Throwable = {
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
index d2831f27e37..c451f606c23 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
@@ -229,7 +229,7 @@ private[sql] object QueryParsingErrors extends 
QueryErrorsBase {
   def invalidNumericLiteralRangeError(rawStrippedQualifier: String, minValue: 
BigDecimal,
       maxValue: BigDecimal, typeName: String, ctx: NumberContext): Throwable = 
{
     new ParseException(
-      errorClass = "_LEGACY_ERROR_TEMP_0023",
+      errorClass = "INVALID_NUMERIC_LITERAL_RANGE",
       messageParameters = Map(
         "rawStrippedQualifier" -> rawStrippedQualifier,
         "minValue" -> minValue.toString(),
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
index 94bbb9e0caa..5d26fa5ea81 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
@@ -1123,9 +1123,10 @@ class AnalysisErrorSuite extends AnalysisTest {
       "Scalar subquery must return only one column, but got 2" :: Nil)
 
     // t2.* cannot be resolved and the error should be the initial analysis 
exception.
-    assertAnalysisError(
+    assertAnalysisErrorClass(
       Project(ScalarSubquery(t0.select(star("t2"))).as("sub") :: Nil, t1),
-      "cannot resolve 't2.*' given input columns ''" :: Nil
+      expectedErrorClass = "CANNOT_RESOLVE_STAR_EXPAND",
+      expectedMessageParameters = Map("targetString" -> "`t2`", "columns" -> 
"")
     )
   }
 
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveSubquerySuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveSubquerySuite.scala
index 67265fe6f3b..679d6c26d2d 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveSubquerySuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/ResolveSubquerySuite.scala
@@ -219,14 +219,19 @@ class ResolveSubquerySuite extends AnalysisTest {
         LateralSubquery(Project(Seq(outerA, outerB, b, c), t2.as("t2")), 
Seq(a, b)), Inner, None)
     )
     // SELECT * FROM t1, LATERAL (SELECT t2.*)
-    assertAnalysisError(
+    assertAnalysisErrorClass(
       lateralJoin(t1.as("t1"), t0.select(star("t2"))),
-      Seq("cannot resolve 't2.*' given input columns ''")
+      expectedErrorClass = "CANNOT_RESOLVE_STAR_EXPAND",
+      expectedMessageParameters = Map("targetString" -> "`t2`", "columns" -> 
"")
     )
     // Check case sensitivities.
     // SELECT * FROM t1, LATERAL (SELECT T1.*)
     val plan = lateralJoin(t1.as("t1"), t0.select(star("T1")))
-    assertAnalysisError(plan, "cannot resolve 'T1.*' given input columns ''" 
:: Nil)
+    assertAnalysisErrorClass(
+      plan,
+      expectedErrorClass = "CANNOT_RESOLVE_STAR_EXPAND",
+      expectedMessageParameters = Map("targetString" -> "`T1`", "columns" -> 
"")
+    )
     assertAnalysisSuccess(plan, caseSensitive = false)
   }
 
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
index 5e70402f2e7..08d3f6b3d07 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/ExpressionParserSuite.scala
@@ -730,7 +730,7 @@ class ExpressionParserSuite extends AnalysisTest {
     assertEqual("10Y", Literal(10.toByte))
     checkError(
       exception = parseException("1000Y"),
-      errorClass = "_LEGACY_ERROR_TEMP_0023",
+      errorClass = "INVALID_NUMERIC_LITERAL_RANGE",
       parameters = Map(
         "rawStrippedQualifier" -> "1000",
         "minValue" -> Byte.MinValue.toString,
@@ -745,7 +745,7 @@ class ExpressionParserSuite extends AnalysisTest {
     assertEqual("10S", Literal(10.toShort))
     checkError(
       exception = parseException("40000S"),
-      errorClass = "_LEGACY_ERROR_TEMP_0023",
+      errorClass = "INVALID_NUMERIC_LITERAL_RANGE",
       parameters = Map(
         "rawStrippedQualifier" -> "40000",
         "minValue" -> Short.MinValue.toString,
@@ -760,7 +760,7 @@ class ExpressionParserSuite extends AnalysisTest {
     assertEqual("10L", Literal(10L))
     checkError(
       exception = parseException("78732472347982492793712334L"),
-      errorClass = "_LEGACY_ERROR_TEMP_0023",
+      errorClass = "INVALID_NUMERIC_LITERAL_RANGE",
       parameters = Map(
         "rawStrippedQualifier" -> "78732472347982492793712334",
         "minValue" -> Long.MinValue.toString,
@@ -775,7 +775,7 @@ class ExpressionParserSuite extends AnalysisTest {
     assertEqual("10.0D", Literal(10.0D))
     checkError(
       exception = parseException("-1.8E308D"),
-      errorClass = "_LEGACY_ERROR_TEMP_0023",
+      errorClass = "INVALID_NUMERIC_LITERAL_RANGE",
       parameters = Map(
         "rawStrippedQualifier" -> "-1.8E308",
         "minValue" -> BigDecimal(Double.MinValue).toString,
@@ -787,7 +787,7 @@ class ExpressionParserSuite extends AnalysisTest {
         stop = 8))
     checkError(
       exception = parseException("1.8E308D"),
-      errorClass = "_LEGACY_ERROR_TEMP_0023",
+      errorClass = "INVALID_NUMERIC_LITERAL_RANGE",
       parameters = Map(
         "rawStrippedQualifier" -> "1.8E308",
         "minValue" -> BigDecimal(Double.MinValue).toString,
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/literals.sql.out 
b/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/literals.sql.out
index 9e908ad798e..dd3cf597c95 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/literals.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/ansi/literals.sql.out
@@ -32,7 +32,7 @@ select 128Y
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "127",
     "minValue" : "-128",
@@ -68,7 +68,7 @@ select 32768S
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "32767",
     "minValue" : "-32768",
@@ -104,7 +104,7 @@ select 9223372036854775808L
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "9223372036854775807",
     "minValue" : "-9223372036854775808",
@@ -196,7 +196,7 @@ select -3.4028235E39f
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "3.4028234663852886E+38",
     "minValue" : "-3.4028234663852886E+38",
@@ -246,7 +246,7 @@ select 1E309, -1E309
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "1.7976931348623157E+308",
     "minValue" : "-1.7976931348623157E+308",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
index 61a366e862c..95f3e53ff60 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
@@ -312,10 +312,10 @@ SELECT t1.x.y.* FROM t1
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_1051",
+  "errorClass" : "CANNOT_RESOLVE_STAR_EXPAND",
   "messageParameters" : {
-    "columns" : "i1",
-    "targetString" : "t1.x.y"
+    "columns" : "`i1`",
+    "targetString" : "`t1`.`x`.`y`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out 
b/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out
index fe598f886ad..74c25e11bd9 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out
@@ -177,10 +177,10 @@ SELECT * FROM t1, LATERAL (SELECT t1.*, t2.* FROM t2, 
LATERAL (SELECT t1.*, t2.*
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_1051",
+  "errorClass" : "CANNOT_RESOLVE_STAR_EXPAND",
   "messageParameters" : {
-    "columns" : "c1, c2",
-    "targetString" : "t1"
+    "columns" : "`c1`, `c2`",
+    "targetString" : "`t1`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/literals.sql.out 
b/sql/core/src/test/resources/sql-tests/analyzer-results/literals.sql.out
index 9e908ad798e..dd3cf597c95 100644
--- a/sql/core/src/test/resources/sql-tests/analyzer-results/literals.sql.out
+++ b/sql/core/src/test/resources/sql-tests/analyzer-results/literals.sql.out
@@ -32,7 +32,7 @@ select 128Y
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "127",
     "minValue" : "-128",
@@ -68,7 +68,7 @@ select 32768S
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "32767",
     "minValue" : "-32768",
@@ -104,7 +104,7 @@ select 9223372036854775808L
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "9223372036854775807",
     "minValue" : "-9223372036854775808",
@@ -196,7 +196,7 @@ select -3.4028235E39f
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "3.4028234663852886E+38",
     "minValue" : "-3.4028234663852886E+38",
@@ -246,7 +246,7 @@ select 1E309, -1E309
 -- !query analysis
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "1.7976931348623157E+308",
     "minValue" : "-1.7976931348623157E+308",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/boolean.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/boolean.sql.out
index 355ccef60ac..adf7bcda741 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/boolean.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/boolean.sql.out
@@ -414,10 +414,9 @@ INSERT INTO BOOLTBL2
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2331",
+  "errorClass" : "FAILED_SQL_EXPRESSION_EVALUATION",
   "messageParameters" : {
-    "msg" : "[CAST_INVALID_INPUT] The value 'XXX' of the type \"STRING\" 
cannot be cast to \"BOOLEAN\" because it is malformed. Correct the value as per 
the syntax, or change its target type. Use `try_cast` to tolerate malformed 
input and return NULL instead. If necessary set \"spark.sql.ansi.enabled\" to 
\"false\" to bypass this error.\n== SQL(line 2, position 12) ==\n   VALUES 
(boolean('XXX'))\n           ^^^^^^^^^^^^^^\n",
-    "sqlExpr" : "CAST('XXX' AS BOOLEAN)"
+    "sqlExpr" : "\"XXX\""
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part3.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part3.sql.out
index 53927b549bb..27fafeb38dc 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part3.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part3.sql.out
@@ -65,10 +65,9 @@ insert into datetimes values
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2331",
+  "errorClass" : "FAILED_SQL_EXPRESSION_EVALUATION",
   "messageParameters" : {
-    "msg" : "[CAST_INVALID_INPUT] The value '11:00 BST' of the type \"STRING\" 
cannot be cast to \"TIMESTAMP\" because it is malformed. Correct the value as 
per the syntax, or change its target type. Use `try_cast` to tolerate malformed 
input and return NULL instead. If necessary set \"spark.sql.ansi.enabled\" to 
\"false\" to bypass this error.\n== SQL(line 2, position 24) ==\n(1, timestamp 
'11:00', cast ('11:00 BST' as timestamp), cast ('1 year' as timestamp), ...\n   
                   [...]
-    "sqlExpr" : "CAST('11:00 BST' AS TIMESTAMP)"
+    "sqlExpr" : "\"CAST(11:00 BST AS TIMESTAMP)\""
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part4.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part4.sql.out
index ed6cca796a5..12f754620de 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part4.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/window_part4.sql.out
@@ -500,10 +500,9 @@ FROM (VALUES(1,1),(2,2),(3,(cast('nan' as 
int))),(4,3),(5,4)) t(a,b)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2331",
+  "errorClass" : "FAILED_SQL_EXPRESSION_EVALUATION",
   "messageParameters" : {
-    "msg" : "[CAST_INVALID_INPUT] The value 'nan' of the type \"STRING\" 
cannot be cast to \"INT\" because it is malformed. Correct the value as per the 
syntax, or change its target type. Use `try_cast` to tolerate malformed input 
and return NULL instead. If necessary set \"spark.sql.ansi.enabled\" to 
\"false\" to bypass this error.\n== SQL(line 3, position 29) ==\nFROM 
(VALUES(1,1),(2,2),(3,(cast('nan' as int))),(4,3),(5,4)) t(a,b)\n               
             ^^^^^^^^^^^^^^^^^^\n",
-    "sqlExpr" : "CAST('nan' AS INT)"
+    "sqlExpr" : "\"CAST(nan AS INT)\""
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
index 3342d5672c5..d9e78315d92 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/table-valued-functions.sql.out
@@ -4,9 +4,9 @@ select * from dummy(3)
 -- !query analysis
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2308",
+  "errorClass" : "UNRESOLVABLE_TABLE_VALUED_FUNCTION",
   "messageParameters" : {
-    "name" : "dummy"
+    "name" : "`dummy`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/ansi/literals.sql.out 
b/sql/core/src/test/resources/sql-tests/results/ansi/literals.sql.out
index 402b51c1fdc..56f11497003 100644
--- a/sql/core/src/test/resources/sql-tests/results/ansi/literals.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/ansi/literals.sql.out
@@ -38,7 +38,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "127",
     "minValue" : "-128",
@@ -78,7 +78,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "32767",
     "minValue" : "-32768",
@@ -118,7 +118,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "9223372036854775807",
     "minValue" : "-9223372036854775808",
@@ -222,7 +222,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "3.4028234663852886E+38",
     "minValue" : "-3.4028234663852886E+38",
@@ -278,7 +278,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "1.7976931348623157E+308",
     "minValue" : "-1.7976931348623157E+308",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
index 54397c8da08..4d700b0a142 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
@@ -347,10 +347,10 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_1051",
+  "errorClass" : "CANNOT_RESOLVE_STAR_EXPAND",
   "messageParameters" : {
-    "columns" : "i1",
-    "targetString" : "t1.x.y"
+    "columns" : "`i1`",
+    "targetString" : "`t1`.`x`.`y`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git a/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out 
b/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out
index 85933e3e732..cc7b44ca8e8 100644
--- a/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out
@@ -131,10 +131,10 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_1051",
+  "errorClass" : "CANNOT_RESOLVE_STAR_EXPAND",
   "messageParameters" : {
-    "columns" : "c1, c2",
-    "targetString" : "t1"
+    "columns" : "`c1`, `c2`",
+    "targetString" : "`t1`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git a/sql/core/src/test/resources/sql-tests/results/literals.sql.out 
b/sql/core/src/test/resources/sql-tests/results/literals.sql.out
index 402b51c1fdc..56f11497003 100644
--- a/sql/core/src/test/resources/sql-tests/results/literals.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/literals.sql.out
@@ -38,7 +38,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "127",
     "minValue" : "-128",
@@ -78,7 +78,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "32767",
     "minValue" : "-32768",
@@ -118,7 +118,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "9223372036854775807",
     "minValue" : "-9223372036854775808",
@@ -222,7 +222,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "3.4028234663852886E+38",
     "minValue" : "-3.4028234663852886E+38",
@@ -278,7 +278,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_0023",
+  "errorClass" : "INVALID_NUMERIC_LITERAL_RANGE",
   "messageParameters" : {
     "maxValue" : "1.7976931348623157E+308",
     "minValue" : "-1.7976931348623157E+308",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
index a47de345a67..44efe4614e9 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/boolean.sql.out
@@ -717,10 +717,9 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2331",
+  "errorClass" : "FAILED_SQL_EXPRESSION_EVALUATION",
   "messageParameters" : {
-    "msg" : "[CAST_INVALID_INPUT] The value 'XXX' of the type \"STRING\" 
cannot be cast to \"BOOLEAN\" because it is malformed. Correct the value as per 
the syntax, or change its target type. Use `try_cast` to tolerate malformed 
input and return NULL instead. If necessary set \"spark.sql.ansi.enabled\" to 
\"false\" to bypass this error.\n== SQL(line 2, position 12) ==\n   VALUES 
(boolean('XXX'))\n           ^^^^^^^^^^^^^^\n",
-    "sqlExpr" : "CAST('XXX' AS BOOLEAN)"
+    "sqlExpr" : "\"XXX\""
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
index 6eccdcb89e1..7b738505be1 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part3.sql.out
@@ -70,10 +70,9 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2331",
+  "errorClass" : "FAILED_SQL_EXPRESSION_EVALUATION",
   "messageParameters" : {
-    "msg" : "[CAST_INVALID_INPUT] The value '11:00 BST' of the type \"STRING\" 
cannot be cast to \"TIMESTAMP\" because it is malformed. Correct the value as 
per the syntax, or change its target type. Use `try_cast` to tolerate malformed 
input and return NULL instead. If necessary set \"spark.sql.ansi.enabled\" to 
\"false\" to bypass this error.\n== SQL(line 2, position 24) ==\n(1, timestamp 
'11:00', cast ('11:00 BST' as timestamp), cast ('1 year' as timestamp), ...\n   
                   [...]
-    "sqlExpr" : "CAST('11:00 BST' AS TIMESTAMP)"
+    "sqlExpr" : "\"CAST(11:00 BST AS TIMESTAMP)\""
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
index edf3e7b5614..8ba267f6618 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/window_part4.sql.out
@@ -499,10 +499,9 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2331",
+  "errorClass" : "FAILED_SQL_EXPRESSION_EVALUATION",
   "messageParameters" : {
-    "msg" : "[CAST_INVALID_INPUT] The value 'nan' of the type \"STRING\" 
cannot be cast to \"INT\" because it is malformed. Correct the value as per the 
syntax, or change its target type. Use `try_cast` to tolerate malformed input 
and return NULL instead. If necessary set \"spark.sql.ansi.enabled\" to 
\"false\" to bypass this error.\n== SQL(line 3, position 29) ==\nFROM 
(VALUES(1,1),(2,2),(3,(cast('nan' as int))),(4,3),(5,4)) t(a,b)\n               
             ^^^^^^^^^^^^^^^^^^\n",
-    "sqlExpr" : "CAST('nan' AS INT)"
+    "sqlExpr" : "\"CAST(nan AS INT)\""
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out 
b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out
index 64ae32da28a..2703df66d2b 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/table-valued-functions.sql.out
@@ -6,9 +6,9 @@ struct<>
 -- !query output
 org.apache.spark.sql.AnalysisException
 {
-  "errorClass" : "_LEGACY_ERROR_TEMP_2308",
+  "errorClass" : "UNRESOLVABLE_TABLE_VALUED_FUNCTION",
   "messageParameters" : {
-    "name" : "dummy"
+    "name" : "`dummy`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
index 381c7714402..089464dd569 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLQuerySuite.scala
@@ -1822,8 +1822,8 @@ class SQLQuerySuite extends QueryTest with 
SharedSparkSession with AdaptiveSpark
           exception = intercept[AnalysisException]{
             sql("SELECT abc.* FROM nestedStructTable")
           },
-          errorClass = "_LEGACY_ERROR_TEMP_1051",
-          parameters = Map("targetString" -> "abc", "columns" -> "record"),
+          errorClass = "CANNOT_RESOLVE_STAR_EXPAND",
+          parameters = Map("targetString" -> "`abc`", "columns" -> "`record`"),
           context = ExpectedContext(fragment = "abc.*", start = 7, stop = 11))
       }
 
@@ -1911,16 +1911,16 @@ class SQLQuerySuite extends QueryTest with 
SharedSparkSession with AdaptiveSpark
         exception = intercept[AnalysisException] {
           sql("SELECT a.* FROM temp_table_no_cols a")
         },
-        errorClass = "_LEGACY_ERROR_TEMP_1051",
-        parameters = Map("targetString" -> "a", "columns" -> ""),
+        errorClass = "CANNOT_RESOLVE_STAR_EXPAND",
+        parameters = Map("targetString" -> "`a`", "columns" -> ""),
         context = ExpectedContext(fragment = "a.*", start = 7, stop = 9))
 
       checkError(
         exception = intercept[AnalysisException] {
           dfNoCols.select($"b.*")
         },
-        errorClass = "_LEGACY_ERROR_TEMP_1051",
-        parameters = Map("targetString" -> "b", "columns" -> ""))
+        errorClass = "CANNOT_RESOLVE_STAR_EXPAND",
+        parameters = Map("targetString" -> "`b`", "columns" -> ""))
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
index d992a8aa08b..bde731e195f 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
@@ -2597,10 +2597,10 @@ class DataSourceV2SQLSuiteV1Filter
         exception = intercept[AnalysisException] {
           sql(s"SELECT ns1.ns2.ns3.tbl.* from $t")
         },
-        errorClass = "_LEGACY_ERROR_TEMP_1051",
+        errorClass = "CANNOT_RESOLVE_STAR_EXPAND",
         parameters = Map(
-          "targetString" -> "ns1.ns2.ns3.tbl",
-          "columns" -> "id, name"),
+          "targetString" -> "`ns1`.`ns2`.`ns3`.`tbl`",
+          "columns" -> "`id`, `name`"),
         context = ExpectedContext(fragment = "ns1.ns2.ns3.tbl.*", start = 7, 
stop = 23))
     }
   }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewTestSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewTestSuite.scala
index c08e48baa57..2956a6345bf 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewTestSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/SQLViewTestSuite.scala
@@ -728,8 +728,8 @@ class PersistedViewTestSuite extends SQLViewTestSuite with 
SharedSparkSession {
         exception = intercept[AnalysisException] {
           sql("SELECT * FROM v")
         },
-        errorClass = "_LEGACY_ERROR_TEMP_1333",
-        parameters = Map("viewText" -> "DROP VIEW v", "tableName" -> 
"spark_catalog.default.v")
+        errorClass = "INVALID_VIEW_TEXT",
+        parameters = Map("viewText" -> "DROP VIEW v", "viewName" -> 
"`spark_catalog`.`default`.`v`")
       )
     }
   }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org


Reply via email to