zhengruifeng commented on code in PR #45846:
URL: https://github.com/apache/spark/pull/45846#discussion_r1549547958


##########
connector/connect/client/jvm/src/test/scala/org/apache/spark/sql/ClientE2ETestSuite.scala:
##########
@@ -938,11 +938,14 @@ class ClientE2ETestSuite extends RemoteSparkSession with 
SQLHelper with PrivateM
     }
     assert(e3.getMessage.contains("AMBIGUOUS_COLUMN_REFERENCE"))
 
-    val e4 = intercept[AnalysisException] {
-      // df1("i") is ambiguous as df1 appears in both join sides (df1_filter 
contains df1).
-      df1.join(df1_filter, df1("i") === 1).collect()
-    }
-    assert(e4.getMessage.contains("AMBIGUOUS_COLUMN_REFERENCE"))
+    //    val e4 = intercept[AnalysisException] {
+    //      // df1("i") is ambiguous as df1 appears in both join sides 
(df1_filter contains df1).
+    //      df1.join(df1_filter, df1("i") === 1).collect()
+    //    }
+    //    assert(e4.getMessage.contains("AMBIGUOUS_COLUMN_REFERENCE"))
+    //
+    //    "[AMBIGUOUS_COLUMN_OR_FIELD] Column or field `i` is ambiguous and 
has 2 matches.

Review Comment:
   ok, the reason it that spark connect scala client can not `collect` 
duplicated column names:
   
   ```
   +---+---+---+---+
   |  i|  j|  i|  j|
   +---+---+---+---+
   |  1|  a|  1|  a|
   +---+---+---+---+
   ```
   
   ```
   [info] - SPARK-45509: ambiguous column reference *** FAILED *** (2 seconds, 
869 milliseconds)
   [info]   org.apache.spark.sql.AnalysisException: [AMBIGUOUS_COLUMN_OR_FIELD] 
Column or field `i` is ambiguous and has 2 matches. SQLSTATE: 42702
   [info]   at 
org.apache.spark.sql.errors.CompilationErrors.ambiguousColumnOrFieldError(CompilationErrors.scala:28)
   [info]   at 
org.apache.spark.sql.errors.CompilationErrors.ambiguousColumnOrFieldError$(CompilationErrors.scala:23)
   [info]   at 
org.apache.spark.sql.errors.CompilationErrors$.ambiguousColumnOrFieldError(CompilationErrors.scala:54)
   [info]   at 
org.apache.spark.sql.connect.client.arrow.ArrowDeserializers$.$anonfun$createFieldLookup$1(ArrowDeserializer.scala:460)
   [info]   at 
org.apache.spark.sql.connect.client.arrow.ArrowDeserializers$.$anonfun$createFieldLookup$1$adapted(ArrowDeserializer.scala:454)
   [info]   at scala.collection.immutable.List.foreach(List.scala:334)
   [info]   at 
org.apache.spark.sql.connect.client.arrow.ArrowDeserializers$.createFieldLookup(ArrowDeserializer.scala:454)
   [info]   at 
org.apache.spark.sql.connect.client.arrow.ArrowDeserializers$.deserializerFor(ArrowDeserializer.scala:328)
   [info]   at 
org.apache.spark.sql.connect.client.arrow.ArrowDeserializers$.deserializerFor(ArrowDeserializer.scala:86)
   [info]   at 
org.apache.spark.sql.connect.client.arrow.ArrowDeserializingIterator.<init>(ArrowDeserializer.scala:542)
   ```



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to