This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 2ff6914e6ba [SPARK-38913][SQL] Output identifiers in error messages in 
SQL style
2ff6914e6ba is described below

commit 2ff6914e6bac053231825c083fd508726a11a349
Author: Max Gekk <max.g...@gmail.com>
AuthorDate: Sun Apr 17 18:28:49 2022 +0300

    [SPARK-38913][SQL] Output identifiers in error messages in SQL style
    
    ### What changes were proposed in this pull request?
    In the PR, I propose to use backticks to wrap SQL identifiers in error 
messages. I added new util functions `toSQLId()` to the trait 
`QueryErrorsBase`, and applied it in `Query.*Errors` (also modified tests in 
`Query.*ErrorsSuite`). For example:
    
    Before:
    ```sql
    Invalid SQL syntax: The definition of window win is repetitive.
    ```
    
    After:
    ```
    Invalid SQL syntax: The definition of window `win` is repetitive.
    ```
    
    ### Why are the changes needed?
    To improve user experience with Spark SQL. The changes highlight SQL 
identifiers in error massages and make them more visible for users.
    
    ### Does this PR introduce _any_ user-facing change?
    No since error classes haven't been released yet.
    
    ### How was this patch tested?
    By running the affected test suites:
    ```
    $ build/sbt "test:testOnly *QueryParsingErrorsSuite"
    $ build/sbt "test:testOnly *QueryCompilationErrorsSuite"
    $ build/sbt "test:testOnly *QueryCompilationErrorsDSv2Suite"
    $ build/sbt "test:testOnly *QueryExecutionErrorsSuite"
    $ build/sbt "testOnly *PlanParserSuite"
    $ build/sbt "testOnly *DDLParserSuite"
    $ build/sbt -Phive-2.3 "testOnly *HiveSQLInsertTestSuite"
    $ build/sbt "sql/testOnly org.apache.spark.sql.SQLQueryTestSuite -- -z 
window.sql"
    $ build/sbt "testOnly *DSV2SQLInsertTestSuite"
    ```
    
    Closes #36210 from MaxGekk/error-class-toSQLId.
    
    Authored-by: Max Gekk <max.g...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json   |  2 +-
 .../sql/tests/test_pandas_udf_grouped_agg.py       |  2 +-
 .../spark/sql/errors/QueryCompilationErrors.scala  |  9 ++--
 .../apache/spark/sql/errors/QueryErrorsBase.scala  |  9 ++++
 .../spark/sql/errors/QueryExecutionErrors.scala    |  8 ++--
 .../spark/sql/errors/QueryParsingErrors.scala      | 32 +++++++------
 .../spark/sql/catalyst/parser/DDLParserSuite.scala |  6 ---
 .../sql/catalyst/parser/PlanParserSuite.scala      | 12 -----
 .../spark/sql/execution/SparkSqlParser.scala       |  2 +-
 .../resources/sql-tests/results/window.sql.out     |  2 +-
 .../org/apache/spark/sql/SQLInsertTestSuite.scala  |  2 +-
 .../errors/QueryCompilationErrorsDSv2Suite.scala   |  6 +--
 .../sql/errors/QueryCompilationErrorsSuite.scala   |  2 +-
 .../sql/errors/QueryExecutionErrorsSuite.scala     |  8 ++--
 .../spark/sql/errors/QueryParsingErrorsSuite.scala | 54 ++++++++++++++++------
 .../sql/execution/command/DDLParserSuite.scala     | 15 ------
 16 files changed, 90 insertions(+), 81 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index 397af26302b..a98cd6fc211 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -38,7 +38,7 @@
     "sqlState" : "22012"
   },
   "DUPLICATE_KEY" : {
-    "message" : [ "Found duplicate keys '%s'" ],
+    "message" : [ "Found duplicate keys %s" ],
     "sqlState" : "23000"
   },
   "FAILED_EXECUTE_UDF" : {
diff --git a/python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py 
b/python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py
index fc9be7c2943..56fad6fc1ac 100644
--- a/python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py
+++ b/python/pyspark/sql/tests/test_pandas_udf_grouped_agg.py
@@ -487,7 +487,7 @@ class GroupedAggPandasUDFTests(ReusedSQLTestCase):
         with QuietTest(self.sc):
             with self.assertRaisesRegex(
                 AnalysisException,
-                "The group aggregate pandas UDF 'avg' cannot be invoked 
together with as other, "
+                "The group aggregate pandas UDF `avg` cannot be invoked 
together with as other, "
                 "non-pandas aggregate functions.",
             ):
                 df.groupby(df.id).agg(mean_udf(df.v), mean(df.v)).collect()
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index 479127d8a6b..6b32a08b6fd 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -46,7 +46,7 @@ import org.apache.spark.sql.types._
  * As commands are executed eagerly, this also includes errors thrown during 
the execution of
  * commands, which users can see immediately.
  */
-object QueryCompilationErrors {
+object QueryCompilationErrors extends QueryErrorsBase {
 
   def groupingIDMismatchError(groupingID: GroupingID, groupByExprs: 
Seq[Expression]): Throwable = {
     new AnalysisException(
@@ -94,13 +94,14 @@ object QueryCompilationErrors {
   def unsupportedIfNotExistsError(tableName: String): Throwable = {
     new AnalysisException(
       errorClass = "UNSUPPORTED_FEATURE",
-      messageParameters = Array(s"IF NOT EXISTS for the table '$tableName' by 
INSERT INTO."))
+      messageParameters = Array(
+        s"IF NOT EXISTS for the table ${toSQLId(tableName)} by INSERT INTO."))
   }
 
   def nonPartitionColError(partitionName: String): Throwable = {
     new AnalysisException(
       errorClass = "NON_PARTITION_COLUMN",
-      messageParameters = Array(partitionName))
+      messageParameters = Array(toSQLId(partitionName)))
   }
 
   def missingStaticPartitionColumn(staticName: String): Throwable = {
@@ -1346,7 +1347,7 @@ object QueryCompilationErrors {
       groupAggPandasUDFNames: Seq[String]): Throwable = {
     new AnalysisException(
       errorClass = "INVALID_PANDAS_UDF_PLACEMENT",
-      messageParameters = Array(groupAggPandasUDFNames.map(name => 
s"'$name'").mkString(", ")))
+      messageParameters = 
Array(groupAggPandasUDFNames.map(toSQLId).mkString(", ")))
   }
 
   def ambiguousAttributesInSelfJoinError(
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
index e69e1382ecf..9b18b59c33d 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryErrorsBase.scala
@@ -18,6 +18,7 @@
 package org.apache.spark.sql.errors
 
 import org.apache.spark.sql.catalyst.expressions.Literal
+import org.apache.spark.sql.catalyst.util.quoteIdentifier
 import org.apache.spark.sql.types.{DataType, DoubleType, FloatType}
 
 trait QueryErrorsBase {
@@ -44,4 +45,12 @@ trait QueryErrorsBase {
   def toSQLValue(v: Any, t: DataType): String = {
     litToErrorValue(Literal.create(v, t))
   }
+
+  def toSQLId(parts: Seq[String]): String = {
+    parts.map(quoteIdentifier).mkString(".")
+  }
+
+  def toSQLId(parts: String): String = {
+    toSQLId(parts.split("\\."))
+  }
 }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
index e259103382e..1aef33c6cc2 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
@@ -1894,12 +1894,14 @@ object QueryExecutionErrors extends QueryErrorsBase {
       messageParameters = Array("Pivot not after a groupBy."))
   }
 
+  private val aesFuncName = toSQLId("aes_encrypt") + "/" + 
toSQLId("aes_decrypt")
+
   def invalidAesKeyLengthError(actualLength: Int): RuntimeException = {
     new SparkRuntimeException(
       errorClass = "INVALID_PARAMETER_VALUE",
       messageParameters = Array(
         "key",
-        "the aes_encrypt/aes_decrypt function",
+        s"the $aesFuncName function",
         s"expects a binary value with 16, 24 or 32 bytes, but got 
${actualLength.toString} bytes."))
   }
 
@@ -1907,7 +1909,7 @@ object QueryExecutionErrors extends QueryErrorsBase {
     new SparkRuntimeException(
       errorClass = "UNSUPPORTED_FEATURE",
       messageParameters = Array(
-        s"AES-$mode with the padding $padding by the aes_encrypt/aes_decrypt 
function."))
+        s"AES-$mode with the padding $padding by the $aesFuncName function."))
   }
 
   def aesCryptoError(detailMessage: String): RuntimeException = {
@@ -1915,7 +1917,7 @@ object QueryExecutionErrors extends QueryErrorsBase {
       errorClass = "INVALID_PARAMETER_VALUE",
       messageParameters = Array(
         "expr, key",
-        "the aes_encrypt/aes_decrypt function",
+        s"the $aesFuncName function",
         s"Detail message: $detailMessage"))
   }
 
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
index 69e118d88bd..ba9c9ecbade 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryParsingErrors.scala
@@ -22,13 +22,12 @@ import org.antlr.v4.runtime.ParserRuleContext
 import org.apache.spark.sql.catalyst.parser.ParseException
 import org.apache.spark.sql.catalyst.parser.SqlBaseParser._
 import org.apache.spark.sql.catalyst.trees.Origin
-import org.apache.spark.sql.connector.catalog.CatalogV2Implicits._
 
 /**
  * Object for grouping all error messages of the query parsing.
  * Currently it includes all ParseException.
  */
-object QueryParsingErrors {
+object QueryParsingErrors extends QueryErrorsBase {
 
   def invalidInsertIntoError(ctx: InsertIntoContext): Throwable = {
     new ParseException("Invalid InsertIntoContext", ctx)
@@ -121,17 +120,17 @@ object QueryParsingErrors {
 
   def repetitiveWindowDefinitionError(name: String, ctx: WindowClauseContext): 
Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"The definition of window '$name' is repetitive."), ctx)
+      Array(s"The definition of window ${toSQLId(name)} is repetitive."), ctx)
   }
 
   def invalidWindowReferenceError(name: String, ctx: WindowClauseContext): 
Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"Window reference '$name' is not a window specification."), ctx)
+      Array(s"Window reference ${toSQLId(name)} is not a window 
specification."), ctx)
   }
 
   def cannotResolveWindowReferenceError(name: String, ctx: 
WindowClauseContext): Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"Cannot resolve window reference '$name'."), ctx)
+      Array(s"Cannot resolve window reference ${toSQLId(name)}."), ctx)
   }
 
   def naturalCrossJoinUnsupportedError(ctx: RelationContext): Throwable = {
@@ -162,7 +161,7 @@ object QueryParsingErrors {
 
   def functionNameUnsupportedError(functionName: String, ctx: 
ParserRuleContext): Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"Unsupported function name '$functionName'"), ctx)
+      Array(s"Unsupported function name ${toSQLId(functionName)}"), ctx)
   }
 
   def cannotParseValueTypeError(
@@ -231,7 +230,10 @@ object QueryParsingErrors {
   }
 
   def tooManyArgumentsForTransformError(name: String, ctx: 
ApplyTransformContext): Throwable = {
-    new ParseException("INVALID_SQL_SYNTAX", Array(s"Too many arguments for 
transform $name"), ctx)
+    new ParseException(
+      errorClass = "INVALID_SQL_SYNTAX",
+      messageParameters = Array(s"Too many arguments for transform 
${toSQLId(name)}"),
+      ctx)
   }
 
   def invalidBucketsNumberError(describe: String, ctx: ApplyTransformContext): 
Throwable = {
@@ -299,12 +301,13 @@ object QueryParsingErrors {
 
   def showFunctionsUnsupportedError(identifier: String, ctx: 
IdentifierContext): Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"SHOW $identifier FUNCTIONS not supported"), ctx)
+      Array(s"SHOW ${toSQLId(identifier)} FUNCTIONS not supported"), ctx)
   }
 
   def showFunctionsInvalidPatternError(pattern: String, ctx: 
ParserRuleContext): Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"Invalid pattern in SHOW FUNCTIONS: $pattern. It must be a string 
literal."), ctx)
+      Array(s"Invalid pattern in SHOW FUNCTIONS: ${toSQLId(pattern)}. " +
+        "It must be a string literal."), ctx)
   }
 
   def duplicateCteDefinitionNamesError(duplicateNames: String, ctx: 
CtesContext): Throwable = {
@@ -326,7 +329,7 @@ object QueryParsingErrors {
 
   def duplicateKeysError(key: String, ctx: ParserRuleContext): Throwable = {
     // Found duplicate keys '$key'
-    new ParseException(errorClass = "DUPLICATE_KEY", messageParameters = 
Array(key), ctx)
+    new ParseException(errorClass = "DUPLICATE_KEY", messageParameters = 
Array(toSQLId(key)), ctx)
   }
 
   def unexpectedFomatForSetConfigurationError(ctx: ParserRuleContext): 
Throwable = {
@@ -418,9 +421,9 @@ object QueryParsingErrors {
       Array("It is not allowed to define a TEMPORARY function with IF NOT 
EXISTS."), ctx)
   }
 
-  def unsupportedFunctionNameError(quoted: String, ctx: 
CreateFunctionContext): Throwable = {
+  def unsupportedFunctionNameError(funcName: Seq[String], ctx: 
CreateFunctionContext): Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"Unsupported function name '$quoted'"), ctx)
+      Array(s"Unsupported function name ${toSQLId(funcName)}"), ctx)
   }
 
   def specifyingDBInCreateTempFuncError(
@@ -428,7 +431,8 @@ object QueryParsingErrors {
       ctx: CreateFunctionContext): Throwable = {
     new ParseException(
       "INVALID_SQL_SYNTAX",
-      Array(s"Specifying a database in CREATE TEMPORARY FUNCTION is not 
allowed: '$databaseName'"),
+      Array("Specifying a database in CREATE TEMPORARY FUNCTION is not 
allowed: " +
+        toSQLId(databaseName)),
       ctx)
   }
 
@@ -442,7 +446,7 @@ object QueryParsingErrors {
 
   def invalidNameForDropTempFunc(name: Seq[String], ctx: ParserRuleContext): 
Throwable = {
     new ParseException("INVALID_SQL_SYNTAX",
-      Array(s"DROP TEMPORARY FUNCTION requires a single part name but got: 
${name.quoted}"), ctx)
+      Array(s"DROP TEMPORARY FUNCTION requires a single part name but got: 
${toSQLId(name)}"), ctx)
   }
 
   def defaultColumnNotImplementedYetError(ctx: ParserRuleContext): Throwable = 
{
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
index e6ae07310ac..c350125d1ce 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/DDLParserSuite.scala
@@ -2049,12 +2049,6 @@ class DDLParserSuite extends AnalysisTest {
     comparePlans(
       parsePlan("SHOW FUNCTIONS IN db LIKE 'funct*'"),
       ShowFunctions(UnresolvedNamespace(Seq("db")), true, true, 
Some("funct*")))
-    val sql = "SHOW other FUNCTIONS"
-    intercept(sql, s"$sql not supported")
-    intercept("SHOW FUNCTIONS IN db f1",
-      "Invalid pattern in SHOW FUNCTIONS: f1")
-    intercept("SHOW FUNCTIONS IN db LIKE f1",
-      "Invalid pattern in SHOW FUNCTIONS: f1")
 
     // The legacy syntax.
     comparePlans(
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala
index a138b907b29..3d0d2fea7be 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/parser/PlanParserSuite.scala
@@ -437,18 +437,6 @@ class PlanParserSuite extends AnalysisTest {
          |       w2 as w1,
          |       w3 as w1""".stripMargin,
       WithWindowDefinition(ws1, plan))
-
-    // Fail with no reference.
-    intercept(s"$sql window w2 as w1", "Cannot resolve window reference 'w1'")
-
-    // Fail when resolved reference is not a window spec.
-    intercept(
-      s"""$sql
-         |window w1 as (partition by a, b order by c rows between 1 preceding 
and 1 following),
-         |       w2 as w1,
-         |       w3 as w2""".stripMargin,
-      "Window reference 'w2' is not a window specification"
-    )
   }
 
   test("lateral view") {
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index 86756f5eb57..752b3711283 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -563,7 +563,7 @@ class SparkSqlAstBuilder extends AstBuilder {
       }
 
       if (functionIdentifier.length > 2) {
-        throw 
QueryParsingErrors.unsupportedFunctionNameError(functionIdentifier.quoted, ctx)
+        throw 
QueryParsingErrors.unsupportedFunctionNameError(functionIdentifier, ctx)
       } else if (functionIdentifier.length == 2) {
         // Temporary function names should not contain database prefix like 
"database.function"
         throw 
QueryParsingErrors.specifyingDBInCreateTempFuncError(functionIdentifier.head, 
ctx)
diff --git a/sql/core/src/test/resources/sql-tests/results/window.sql.out 
b/sql/core/src/test/resources/sql-tests/results/window.sql.out
index b5b6895b099..bac00d85c67 100644
--- a/sql/core/src/test/resources/sql-tests/results/window.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/window.sql.out
@@ -898,7 +898,7 @@ struct<>
 -- !query output
 org.apache.spark.sql.catalyst.parser.ParseException
 
-Invalid SQL syntax: The definition of window 'w' is repetitive.(line 8, pos 0)
+Invalid SQL syntax: The definition of window `w` is repetitive.(line 8, pos 0)
 
 == SQL ==
 SELECT
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala
index ab5c66dfec7..3ff526bd9db 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/SQLInsertTestSuite.scala
@@ -276,7 +276,7 @@ trait SQLInsertTestSuite extends QueryTest with 
SQLTestUtils {
       val e = intercept[AnalysisException] {
         sql("INSERT OVERWRITE t PARTITION (c='2', C='3') VALUES (1)")
       }
-      assert(e.getMessage.contains("Found duplicate keys 'c'"))
+      assert(e.getMessage.contains("Found duplicate keys `c`"))
     }
     // The following code is skipped for Hive because columns stored in Hive 
Metastore is always
     // case insensitive and we cannot create such table in Hive Metastore.
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
index 042f130d7f5..d58c2b56293 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsDSv2Suite.scala
@@ -52,7 +52,7 @@ class QueryCompilationErrorsDSv2Suite
 
         checkAnswer(spark.table(tbl), spark.emptyDataFrame)
         assert(e.getMessage === "The feature is not supported: " +
-          s"IF NOT EXISTS for the table '$tbl' by INSERT INTO.")
+          s"IF NOT EXISTS for the table `testcat`.`ns1`.`ns2`.`tbl` by INSERT 
INTO.")
         assert(e.getErrorClass === "UNSUPPORTED_FEATURE")
         assert(e.getSqlState === "0A000")
       }
@@ -69,7 +69,7 @@ class QueryCompilationErrorsDSv2Suite
       }
 
       verifyTable(t1, spark.emptyDataFrame)
-      assert(e.getMessage === "PARTITION clause cannot contain a non-partition 
column name: id")
+      assert(e.getMessage === "PARTITION clause cannot contain a non-partition 
column name: `id`")
       assert(e.getErrorClass === "NON_PARTITION_COLUMN")
     }
   }
@@ -84,7 +84,7 @@ class QueryCompilationErrorsDSv2Suite
       }
 
       verifyTable(t1, spark.emptyDataFrame)
-      assert(e.getMessage === "PARTITION clause cannot contain a non-partition 
column name: data")
+      assert(e.getMessage === "PARTITION clause cannot contain a non-partition 
column name: `data`")
       assert(e.getErrorClass === "NON_PARTITION_COLUMN")
     }
   }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
index de671df74c8..9eb8f98ed55 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
@@ -126,7 +126,7 @@ class QueryCompilationErrorsSuite extends QueryTest with 
SharedSparkSession {
 
     assert(e.errorClass === Some("INVALID_PANDAS_UDF_PLACEMENT"))
     assert(e.message ===
-      "The group aggregate pandas UDF 'pandas_udf_1', 'pandas_udf_2' cannot be 
invoked " +
+      "The group aggregate pandas UDF `pandas_udf_1`, `pandas_udf_2` cannot be 
invoked " +
       "together with as other, non-pandas aggregate functions.")
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
index 09f655431dc..8f797210904 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
@@ -66,8 +66,8 @@ class QueryExecutionErrorsSuite extends QueryTest
       assert(e.getErrorClass === "INVALID_PARAMETER_VALUE")
       assert(e.getSqlState === "22023")
       assert(e.getMessage.matches(
-        "The value of parameter\\(s\\) 'key' in the aes_encrypt/aes_decrypt 
function is invalid: " +
-        "expects a binary value with 16, 24 or 32 bytes, but got \\d+ bytes."))
+        "The value of parameter\\(s\\) 'key' in the 
`aes_encrypt`/`aes_decrypt` function " +
+        "is invalid: expects a binary value with 16, 24 or 32 bytes, but got 
\\d+ bytes."))
     }
 
     // Encryption failure - invalid key length
@@ -100,7 +100,7 @@ class QueryExecutionErrorsSuite extends QueryTest
       assert(e.getErrorClass === "INVALID_PARAMETER_VALUE")
       assert(e.getSqlState === "22023")
       assert(e.getMessage ===
-        "The value of parameter(s) 'expr, key' in the aes_encrypt/aes_decrypt 
function " +
+        "The value of parameter(s) 'expr, key' in the 
`aes_encrypt`/`aes_decrypt` function " +
         "is invalid: Detail message: " +
         "Given final block not properly padded. " +
         "Such issues can arise if a bad key is used during decryption.")
@@ -118,7 +118,7 @@ class QueryExecutionErrorsSuite extends QueryTest
       assert(e.getErrorClass === "UNSUPPORTED_FEATURE")
       assert(e.getSqlState === "0A000")
       assert(e.getMessage.matches("""The feature is not supported: AES-\w+ 
with the padding \w+""" +
-        " by the aes_encrypt/aes_decrypt function."))
+        " by the `aes_encrypt`/`aes_decrypt` function."))
     }
 
     // Unsupported AES mode and padding in encrypt
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryParsingErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryParsingErrorsSuite.scala
index fe92eab20f7..29ed18071a4 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryParsingErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryParsingErrorsSuite.scala
@@ -132,7 +132,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "42000",
       message =
         """
-          |Invalid SQL syntax: The definition of window 'win' is 
repetitive.(line 1, pos 31)
+          |Invalid SQL syntax: The definition of window `win` is 
repetitive.(line 1, pos 31)
           |
           |== SQL ==
           |SELECT min(a) OVER win FROM t1 WINDOW win AS win, win AS win2
@@ -147,7 +147,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "42000",
       message =
         """
-          |Invalid SQL syntax: Window reference 'win' is not a window 
specification.(line 1, pos 31)
+          |Invalid SQL syntax: Window reference `win` is not a window 
specification.(line 1, pos 31)
           |
           |== SQL ==
           |SELECT min(a) OVER win FROM t1 WINDOW win AS win
@@ -162,7 +162,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "42000",
       message =
         """
-          |Invalid SQL syntax: Cannot resolve window reference 'win2'.(line 1, 
pos 31)
+          |Invalid SQL syntax: Cannot resolve window reference `win2`.(line 1, 
pos 31)
           |
           |== SQL ==
           |SELECT min(a) OVER win FROM t1 WINDOW win AS win2
@@ -211,7 +211,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "42000",
       message =
         """
-          |Invalid SQL syntax: Too many arguments for transform years(line 1, 
pos 44)
+          |Invalid SQL syntax: Too many arguments for transform `years`(line 
1, pos 44)
           |
           |== SQL ==
           |CREATE TABLE table(col int) PARTITIONED BY (years(col,col))
@@ -226,7 +226,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "42000",
       message =
         """
-          |Invalid SQL syntax: Unsupported function name 'ns.db.func'(line 1, 
pos 14)
+          |Invalid SQL syntax: Unsupported function name `ns`.`db`.`func`(line 
1, pos 14)
           |
           |== SQL ==
           |SELECT * FROM ns.db.func()
@@ -241,7 +241,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "42000",
       message =
         """
-          |Invalid SQL syntax: SHOW sys FUNCTIONS not supported(line 1, pos 5)
+          |Invalid SQL syntax: SHOW `sys` FUNCTIONS not supported(line 1, pos 
5)
           |
           |== SQL ==
           |SHOW sys FUNCTIONS
@@ -249,23 +249,34 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
           |""".stripMargin)
   }
 
+  // scalastyle:off line.size.limit
   test("INVALID_SQL_SYNTAX: Invalid pattern in show functions") {
-    val errorDesc =
-      "Invalid pattern in SHOW FUNCTIONS: f1. It must be a string 
literal.(line 1, pos 21)"
-
     validateParsingError(
       sqlText = "SHOW FUNCTIONS IN db f1",
       errorClass = "INVALID_SQL_SYNTAX",
       sqlState = "42000",
       message =
         s"""
-          |Invalid SQL syntax: $errorDesc
+          |Invalid SQL syntax: Invalid pattern in SHOW FUNCTIONS: `f1`. It 
must be a string literal.(line 1, pos 21)
           |
           |== SQL ==
           |SHOW FUNCTIONS IN db f1
           |---------------------^^^
           |""".stripMargin)
+    validateParsingError(
+      sqlText = "SHOW FUNCTIONS IN db LIKE f1",
+      errorClass = "INVALID_SQL_SYNTAX",
+      sqlState = "42000",
+      message =
+        s"""
+           |Invalid SQL syntax: Invalid pattern in SHOW FUNCTIONS: `f1`. It 
must be a string literal.(line 1, pos 26)
+           |
+           |== SQL ==
+           |SHOW FUNCTIONS IN db LIKE f1
+           |--------------------------^^^
+           |""".stripMargin)
   }
+  // scalastyle:on line.size.limit
 
   test("INVALID_SQL_SYNTAX: Create function with both if not exists and 
replace") {
     val sqlText =
@@ -335,7 +346,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "42000",
       message =
         """
-          |Invalid SQL syntax: Unsupported function name 'ns.db.func'(line 2, 
pos 0)
+          |Invalid SQL syntax: Unsupported function name `ns`.`db`.`func`(line 
2, pos 0)
           |
           |== SQL ==
           |
@@ -354,7 +365,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
         |JAR '/path/to/jar2'
         |""".stripMargin
     val errorDesc =
-      "Specifying a database in CREATE TEMPORARY FUNCTION is not allowed: 
'db'(line 2, pos 0)"
+      "Specifying a database in CREATE TEMPORARY FUNCTION is not allowed: 
`db`(line 2, pos 0)"
 
     validateParsingError(
       sqlText = sqlText,
@@ -375,7 +386,7 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
 
   test("INVALID_SQL_SYNTAX: Drop temporary function requires a single part 
name") {
     val errorDesc =
-      "DROP TEMPORARY FUNCTION requires a single part name but got: 
db.func(line 1, pos 0)"
+      "DROP TEMPORARY FUNCTION requires a single part name but got: 
`db`.`func`(line 1, pos 0)"
 
     validateParsingError(
       sqlText = "DROP TEMPORARY FUNCTION db.func",
@@ -398,11 +409,26 @@ class QueryParsingErrorsSuite extends QueryTest with 
SharedSparkSession {
       sqlState = "23000",
       message =
         """
-          |Found duplicate keys 'p1'(line 1, pos 29)
+          |Found duplicate keys `p1`(line 1, pos 29)
           |
           |== SQL ==
           |INSERT OVERWRITE TABLE table PARTITION(p1='1', p1='1') SELECT 
'col1', 'col2'
           |-----------------------------^^^
           |""".stripMargin)
   }
+
+  test("DUPLICATE_KEY: in table properties") {
+    validateParsingError(
+      sqlText = "ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('key1' = '1', 'key1' 
= '2')",
+      errorClass = "DUPLICATE_KEY",
+      sqlState = "23000",
+      message =
+        """
+          |Found duplicate keys `key1`(line 1, pos 39)
+          |
+          |== SQL ==
+          |ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('key1' = '1', 'key1' = '2')
+          |---------------------------------------^^^
+          |""".stripMargin)
+  }
 }
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala
index 25802e554d6..05378e32296 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLParserSuite.scala
@@ -200,21 +200,6 @@ class DDLParserSuite extends AnalysisTest with 
SharedSparkSession {
     assert(parsed.isInstanceOf[Project])
   }
 
-  test("duplicate keys in table properties") {
-    val e = intercept[ParseException] {
-      parser.parsePlan("ALTER TABLE dbx.tab1 SET TBLPROPERTIES ('key1' = '1', 
'key1' = '2')")
-    }.getMessage
-    assert(e.contains("Found duplicate keys 'key1'"))
-  }
-
-  test("duplicate columns in partition specs") {
-    val e = intercept[ParseException] {
-      parser.parsePlan(
-        "ALTER TABLE dbx.tab1 PARTITION (a='1', a='2') RENAME TO PARTITION 
(a='100', a='200')")
-    }.getMessage
-    assert(e.contains("Found duplicate keys 'a'"))
-  }
-
   test("unsupported operations") {
     intercept[ParseException] {
       parser.parsePlan(


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to