This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new a8893422752 [SPARK-43867][SQL] Improve suggested candidates for 
unresolved attribute
a8893422752 is described below

commit a88934227523334550e451e437ce013772001079
Author: Max Gekk <[email protected]>
AuthorDate: Wed May 31 21:04:44 2023 +0300

    [SPARK-43867][SQL] Improve suggested candidates for unresolved attribute
    
    ### What changes were proposed in this pull request?
    In the PR, I propose to change the approach of stripping the common part of 
candidate qualifiers in `StringUtils.orderSuggestedIdentifiersBySimilarity`:
    1. If all candidates have the same qualifier including namespace and table 
name, drop it. It should be dropped if the base string (unresolved attribute) 
doesn't include a namespace and table name. For example:
        - `[ns1.table1.col1, ns1.table1.col2] -> [col1, col2]` for unresolved 
attribute `col0`
        - `[ns1.table1.col1, ns1.table1.col2] -> [table1.col1, table1.col2]` 
for unresolved attribute `table1.col0`
    2. If all candidates belong to the same namespace, just drop it. It should 
be dropped for any non-fully qualified unresolved attribute. For example:
        - `[ns1.table1.col1, ns1.table2.col2] -> [table1.col1, table2.col2]` 
for unresolved attribute `col0` or `table0.col0`
        - `[ns1.table1.col1, ns1.table1.col2] -> [ns1.table1.col1, 
ns1.table1.col2]` for unresolved attribute `ns0.table0.col0`
    4. Otherwise take the suggested candidates AS IS.
    5. Sort the candidate list using the levenshtein distance.
    
    ### Why are the changes needed?
    This should improve user experience with Spark SQL by simplifying the error 
message about an unresolved attribute.
    
    ### Does this PR introduce _any_ user-facing change?
    Yes, it changes the error message.
    
    ### How was this patch tested?
    By running the existing test suites:
    ```
    $ build/sbt "test:testOnly *AnalysisErrorSuite"
    $ build/sbt "test:testOnly *QueryCompilationErrorsSuite"
    $ PYSPARK_PYTHON=python3 build/sbt "sql/testOnly 
org.apache.spark.sql.SQLQueryTestSuite"
    $ build/sbt "test:testOnly *DatasetUnpivotSuite"
    $ build/sbt "test:testOnly *DatasetSuite"
    
    ```
    
    Closes #41368 from MaxGekk/fix-suggested-column-list.
    
    Authored-by: Max Gekk <[email protected]>
    Signed-off-by: Max Gekk <[email protected]>
---
 .../sql/catalyst/analysis/CheckAnalysis.scala      |  3 +-
 .../plans/logical/basicLogicalOperators.scala      |  2 +-
 .../spark/sql/catalyst/util/StringUtils.scala      | 46 +++++++++-------------
 .../sql/catalyst/analysis/AnalysisErrorSuite.scala |  4 +-
 .../spark/sql/catalyst/util/StringUtilsSuite.scala |  5 ++-
 .../columnresolution-negative.sql.out              |  2 +-
 .../analyzer-results/group-by-all.sql.out          |  2 +-
 .../analyzer-results/join-lateral.sql.out          |  2 +-
 .../postgreSQL/aggregates_part1.sql.out            |  2 +-
 .../analyzer-results/postgreSQL/join.sql.out       |  6 +--
 .../udf/postgreSQL/udf-aggregates_part1.sql.out    |  2 +-
 .../udf/postgreSQL/udf-join.sql.out                |  6 +--
 .../results/columnresolution-negative.sql.out      |  2 +-
 .../sql-tests/results/group-by-all.sql.out         |  2 +-
 .../sql-tests/results/join-lateral.sql.out         |  2 +-
 .../results/postgreSQL/aggregates_part1.sql.out    |  2 +-
 .../sql-tests/results/postgreSQL/join.sql.out      |  6 +--
 .../udf/postgreSQL/udf-aggregates_part1.sql.out    |  2 +-
 .../results/udf/postgreSQL/udf-join.sql.out        |  6 +--
 .../org/apache/spark/sql/DatasetUnpivotSuite.scala |  2 +-
 .../spark/sql/connector/DataSourceV2SQLSuite.scala |  4 +-
 .../sql/errors/QueryCompilationErrorsSuite.scala   |  3 +-
 22 files changed, 53 insertions(+), 60 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
index c46dff1c4bf..594c0b666e8 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/CheckAnalysis.scala
@@ -139,7 +139,8 @@ trait CheckAnalysis extends PredicateHelper with 
LookupCatalog with QueryErrorsB
       a: Attribute,
       errorClass: String): Nothing = {
     val missingCol = a.sql
-    val candidates = operator.inputSet.toSeq.map(_.qualifiedName)
+    val candidates = operator.inputSet.toSeq
+      .map(attr => attr.qualifier :+ attr.name)
     val orderedCandidates =
       StringUtils.orderSuggestedIdentifiersBySimilarity(missingCol, candidates)
     throw QueryCompilationErrors.unresolvedAttributeError(
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
index 4bde26a7d6e..a90510fe681 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala
@@ -185,7 +185,7 @@ object Project {
           createNewColumn(columnExpr, f.name, f.metadata, Metadata.empty)
         } else {
           if (columnPath.isEmpty) {
-            val candidates = fields.map(_._1)
+            val candidates = fields.map(field => Seq(field._1))
             val orderedCandidates =
               StringUtils.orderSuggestedIdentifiersBySimilarity(f.name, 
candidates)
             throw QueryCompilationErrors.unresolvedColumnError(f.name, 
orderedCandidates)
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
index e8280fb4430..da912c19393 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/util/StringUtils.scala
@@ -82,36 +82,26 @@ object StringUtils extends Logging {
 
   private[spark] def orderSuggestedIdentifiersBySimilarity(
       baseString: String,
-      testStrings: Seq[String]): Seq[String] = {
-    // This method is used to generate suggested list of candidates closest to 
`baseString` from the
-    // list of `testStrings`. Spark uses it to clarify error message in case a 
query refers to non
-    // existent column or attribute. The `baseString` could be single part or 
multi part and this
-    // method will try to match suggestions.
-    // Note that identifiers from `testStrings` could represent columns or 
attributes from different
-    // catalogs, schemas or tables. We preserve suggested identifier prefix 
and reconstruct
-    // multi-part identifier after ordering if there are more than one unique 
prefix in a list. This
-    // will also reconstruct multi-part identifier for the cases of nested 
columns. E.g. for a
-    // table `t` with columns `a`, `b`, `c.d` (nested) and requested column 
`d` we will create
-    // prefixes `t`, `t`, and `t.c`. Since there is more than one distinct 
prefix we will return
-    // sorted suggestions as multi-part identifiers => (`t`.`c`.`d`, `t`.`a`, 
`t`.`b`).
-    val multiPart = UnresolvedAttribute.parseAttributeName(baseString).size > 1
-    if (multiPart) {
-      testStrings.sortBy(LevenshteinDistance.getDefaultInstance.apply(_, 
baseString))
-    } else {
-      val split = testStrings.map { ident =>
-        val parts = 
UnresolvedAttribute.parseAttributeName(ident).map(quoteIfNeeded)
-        (parts.init.mkString("."), parts.last)
-      }
-      val sorted =
-        split.sortBy(pair => 
LevenshteinDistance.getDefaultInstance.apply(pair._2, baseString))
-      if (sorted.map(_._1).toSet.size == 1) {
-        // All identifier belong to the same relation
-        sorted.map(_._2)
+      candidates: Seq[Seq[String]]): Seq[String] = {
+    val baseParts = UnresolvedAttribute.parseAttributeName(baseString)
+    val strippedCandidates =
+      // Group by the qualifier. If all identifiers have the same qualifier, 
strip it.
+      // For example: Seq(`abc`.`def`.`t1`, `abc`.`def`.`t2`) => Seq(`t1`, 
`t2`)
+      if (baseParts.size == 1 && candidates.groupBy(_.dropRight(1)).size == 1) 
{
+        candidates.map(_.takeRight(1))
+      // Group by the qualifier excluding table name. If all identifiers have 
the same prefix
+      // (namespace) excluding table names, strip this prefix.
+      // For example: Seq(`abc`.`def`.`t1`, `abc`.`xyz`.`t2`) => 
Seq(`def`.`t1`, `xyz`.`t2`)
+      } else if (baseParts.size <= 2 && 
candidates.groupBy(_.dropRight(2)).size == 1) {
+        candidates.map(_.takeRight(2))
       } else {
-        // More than one relation
-        sorted.map(x => if (x._1.isEmpty) s"${x._2}" else s"${x._1}.${x._2}")
+        // Some candidates have different qualifiers
+        candidates
       }
-    }
+
+    strippedCandidates
+      .map(quoteNameParts)
+      .sortBy(LevenshteinDistance.getDefaultInstance.apply(_, baseString))
   }
 
   // scalastyle:off caselocale
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
index 37977e38ba8..5f6256881a4 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/analysis/AnalysisErrorSuite.scala
@@ -464,7 +464,7 @@ class AnalysisErrorSuite extends AnalysisTest {
     // When parse SQL string, we will wrap aggregate expressions with 
UnresolvedAlias.
     testRelation2.where($"bad_column" > 
1).groupBy($"a")(UnresolvedAlias(max($"b"))),
     "UNRESOLVED_COLUMN.WITH_SUGGESTION",
-    Map("objectName" -> "`bad_column`", "proposal" -> "`a`, `b`, `c`, `d`, 
`e`"))
+    Map("objectName" -> "`bad_column`", "proposal" -> "`a`, `c`, `d`, `b`, 
`e`"))
 
   errorClassTest(
     "slide duration greater than window in time window",
@@ -1048,7 +1048,7 @@ class AnalysisErrorSuite extends AnalysisTest {
     testRelation2.where($"bad_column" > 
1).groupBy($"a")(UnresolvedAlias(max($"b"))),
     "[UNRESOLVED_COLUMN.WITH_SUGGESTION] A column or function parameter with 
name " +
       "`bad_column` cannot be resolved. Did you mean one of the following? " +
-      "[`a`, `b`, `c`, `d`, `e`]"
+      "[`a`, `c`, `d`, `b`, `e`]"
       :: Nil)
 
   errorClassTest(
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/StringUtilsSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/StringUtilsSuite.scala
index 8f26bd19a64..e3a46d27692 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/StringUtilsSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/StringUtilsSuite.scala
@@ -141,8 +141,9 @@ class StringUtilsSuite extends SparkFunSuite with SQLHelper 
{
 
   test("SPARK-43841: mix of multipart and single-part identifiers") {
     val baseString = "b"
-    val testStrings = Seq("c1", "v1.c2", "v2.c2") // mix of multipart and 
single-part
-    val expectedOutput = Seq("c1", "v1.c2", "v2.c2")
+    // mix of multipart and single-part
+    val testStrings = Seq(Seq("c1"), Seq("v1", "c2"), Seq("v2.c2"))
+    val expectedOutput = Seq("`c1`", "`v2.c2`", "`v1`.`c2`")
     assert(orderSuggestedIdentifiersBySimilarity(baseString, testStrings) === 
expectedOutput)
   }
 }
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
index 066dd3c4924..c7ce2190199 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/columnresolution-negative.sql.out
@@ -295,7 +295,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`mydb1`.`t1`",
-    "proposal" : "`spark_catalog`.`mydb1`.`t1`.`i1`"
+    "proposal" : "`t1`.`i1`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/group-by-all.sql.out 
b/sql/core/src/test/resources/sql-tests/analyzer-results/group-by-all.sql.out
index 602d41985e7..d193e97f79b 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/group-by-all.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/group-by-all.sql.out
@@ -338,7 +338,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`non_exist`",
-    "proposal" : "`name`, `power`, `city`, `country`, `id`"
+    "proposal" : "`id`, `name`, `power`, `city`, `country`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out 
b/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out
index 09218baebba..fe598f886ad 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/join-lateral.sql.out
@@ -742,7 +742,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t1`.`c1`",
-    "proposal" : "`spark_catalog`.`default`.`t2`.`c1`, 
`spark_catalog`.`default`.`t2`.`c2`"
+    "proposal" : "`t2`.`c1`, `t2`.`c2`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/aggregates_part1.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/aggregates_part1.sql.out
index d582c722ac1..a17627aa8cb 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/aggregates_part1.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/aggregates_part1.sql.out
@@ -509,7 +509,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`o`.`unique1`",
-    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`hundred`, `i`.`even`, 
`i`.`four`"
+    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`even`, `i`.`four`, 
`i`.`odd`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/join.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/join.sql.out
index 02d141a9185..2e747f52a7d 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/join.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/postgreSQL/join.sql.out
@@ -3066,7 +3066,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t1`.`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3088,7 +3088,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t2`.`uunique1`",
-    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`hundred`"
+    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3110,7 +3110,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`ten`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-aggregates_part1.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-aggregates_part1.sql.out
index c2ca4056458..07fc99bb380 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-aggregates_part1.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-aggregates_part1.sql.out
@@ -500,7 +500,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`o`.`unique1`",
-    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`hundred`, `i`.`even`, 
`i`.`four`"
+    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`even`, `i`.`four`, 
`i`.`odd`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-join.sql.out
 
b/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-join.sql.out
index f971f8bb738..239216f760b 100644
--- 
a/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-join.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/analyzer-results/udf/postgreSQL/udf-join.sql.out
@@ -3064,7 +3064,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t1`.`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3086,7 +3086,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t2`.`uunique1`",
-    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`hundred`"
+    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3108,7 +3108,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`ten`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
index ac68cfa6db5..54397c8da08 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/columnresolution-negative.sql.out
@@ -328,7 +328,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`mydb1`.`t1`",
-    "proposal" : "`spark_catalog`.`mydb1`.`t1`.`i1`"
+    "proposal" : "`t1`.`i1`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git a/sql/core/src/test/resources/sql-tests/results/group-by-all.sql.out 
b/sql/core/src/test/resources/sql-tests/results/group-by-all.sql.out
index d044af1e230..bf6394e31c8 100644
--- a/sql/core/src/test/resources/sql-tests/results/group-by-all.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/group-by-all.sql.out
@@ -278,7 +278,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`non_exist`",
-    "proposal" : "`name`, `power`, `city`, `country`, `id`"
+    "proposal" : "`id`, `name`, `power`, `city`, `country`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git a/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out 
b/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out
index bad4407a3f1..85933e3e732 100644
--- a/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/join-lateral.sql.out
@@ -544,7 +544,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t1`.`c1`",
-    "proposal" : "`spark_catalog`.`default`.`t2`.`c1`, 
`spark_catalog`.`default`.`t2`.`c2`"
+    "proposal" : "`t2`.`c1`, `t2`.`c2`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out
index c6ea26abcf3..fcd31080918 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/aggregates_part1.sql.out
@@ -513,7 +513,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`o`.`unique1`",
-    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`hundred`, `i`.`even`, 
`i`.`four`"
+    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`even`, `i`.`four`, 
`i`.`odd`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/postgreSQL/join.sql.out 
b/sql/core/src/test/resources/sql-tests/results/postgreSQL/join.sql.out
index aa9f2dacdd7..44c993b73d7 100644
--- a/sql/core/src/test/resources/sql-tests/results/postgreSQL/join.sql.out
+++ b/sql/core/src/test/resources/sql-tests/results/postgreSQL/join.sql.out
@@ -3311,7 +3311,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t1`.`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3335,7 +3335,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t2`.`uunique1`",
-    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`hundred`"
+    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3359,7 +3359,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`ten`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out
 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out
index e1b9acf437c..8a9a2279171 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-aggregates_part1.sql.out
@@ -504,7 +504,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`o`.`unique1`",
-    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`hundred`, `i`.`even`, 
`i`.`four`"
+    "proposal" : "`i`.`unique1`, `i`.`unique2`, `i`.`even`, `i`.`four`, 
`i`.`odd`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-join.sql.out 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-join.sql.out
index e0c5a0d6d09..a757ae83d24 100644
--- 
a/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-join.sql.out
+++ 
b/sql/core/src/test/resources/sql-tests/results/udf/postgreSQL/udf-join.sql.out
@@ -3339,7 +3339,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t1`.`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3363,7 +3363,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`t2`.`uunique1`",
-    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`hundred`"
+    "proposal" : "`t2`.`unique1`, `t1`.`unique1`, `t2`.`unique2`, 
`t1`.`unique2`, `t2`.`four`"
   },
   "queryContext" : [ {
     "objectType" : "",
@@ -3387,7 +3387,7 @@ org.apache.spark.sql.AnalysisException
   "sqlState" : "42703",
   "messageParameters" : {
     "objectName" : "`uunique1`",
-    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`hundred`"
+    "proposal" : "`t1`.`unique1`, `t2`.`unique1`, `t1`.`unique2`, 
`t2`.`unique2`, `t1`.`ten`"
   },
   "queryContext" : [ {
     "objectType" : "",
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/DatasetUnpivotSuite.scala 
b/sql/core/src/test/scala/org/apache/spark/sql/DatasetUnpivotSuite.scala
index 3c05a7415a1..4117ea63bdd 100644
--- a/sql/core/src/test/scala/org/apache/spark/sql/DatasetUnpivotSuite.scala
+++ b/sql/core/src/test/scala/org/apache/spark/sql/DatasetUnpivotSuite.scala
@@ -373,7 +373,7 @@ class DatasetUnpivotSuite extends QueryTest
       errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
       parameters = Map(
         "objectName" -> "`1`",
-        "proposal" -> "`id`, `int1`, `str1`, `str2`, `long1`"))
+        "proposal" -> "`id`, `int1`, `str1`, `long1`, `str2`"))
 
     // unpivoting where value column does not exist
     val e2 = intercept[AnalysisException] {
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
index 7fdd049a977..d992a8aa08b 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/connector/DataSourceV2SQLSuite.scala
@@ -2065,7 +2065,7 @@ class DataSourceV2SQLSuiteV1Filter
         errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
         parameters = Map(
           "objectName" -> "`dummy`",
-          "proposal" -> "`name`, `age`, `id`, `p`"
+          "proposal" -> "`age`, `id`, `name`, `p`"
         ),
         context = ExpectedContext(
           fragment = "dummy='abc'",
@@ -2076,7 +2076,7 @@ class DataSourceV2SQLSuiteV1Filter
         errorClass = "UNRESOLVED_COLUMN.WITH_SUGGESTION",
         parameters = Map(
           "objectName" -> "`dummy`",
-          "proposal" -> "`name`, `age`, `id`, `p`"
+          "proposal" -> "`age`, `id`, `name`, `p`"
         ),
         context = ExpectedContext(
           fragment = "dummy",
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
index 4973f6e93b6..1c6b8f3e25d 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryCompilationErrorsSuite.scala
@@ -428,7 +428,8 @@ class QueryCompilationErrorsSuite
       exception = intercept[AnalysisException] {sql(query)},
       errorClass = "UNRESOLVED_MAP_KEY.WITH_SUGGESTION",
       sqlState = None,
-      parameters = Map("objectName" -> "`a`",
+      parameters = Map(
+        "objectName" -> "`a`",
         "proposal" -> "`m`, `a.a`"),
       context = ExpectedContext(
         fragment = "a",


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to