This is an automated email from the ASF dual-hosted git repository.

wenchen pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 27d625d  [SPARK-27459][SQL] Revise the exception message of schema 
inference failure in file source V2
27d625d is described below

commit 27d625d785244ae78287e3a0eede44c79dfcbb92
Author: Gengliang Wang <gengliang.w...@databricks.com>
AuthorDate: Mon Apr 15 21:06:03 2019 +0800

    [SPARK-27459][SQL] Revise the exception message of schema inference failure 
in file source V2
    
    ## What changes were proposed in this pull request?
    
    Since 
https://github.com/apache/spark/pull/23383/files#diff-db4a140579c1ac4b1dbec7fe5057eecaR36,
 the exception message of schema inference failure in file source V2 is 
`tableName`, which is equivalent to `shortName + path`.
    
    While in file source V1, the message is `Unable to infer schema from 
ORC/CSV/JSON...`.
    We should make the message in V2 consistent with V1, so that in the future 
migration the related test cases don't need to be modified. 
https://github.com/apache/spark/pull/24058#pullrequestreview-226364350
    
    ## How was this patch tested?
    
    Revert the modified unit test cases in 
https://github.com/apache/spark/pull/24005/files#diff-b9ddfbc9be8d83ecf100b3b8ff9610b9R431
 and 
https://github.com/apache/spark/pull/23383/files#diff-9ab56940ee5a53f2bb81e3c008653362R577,
 and test with them.
    
    Closes #24369 from gengliangwang/reviseInferSchemaMessage.
    
    Authored-by: Gengliang Wang <gengliang.w...@databricks.com>
    Signed-off-by: Wenchen Fan <wenc...@databricks.com>
---
 .../scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala | 2 +-
 .../org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala  | 2 +-
 .../scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala    | 2 +-
 3 files changed, 3 insertions(+), 3 deletions(-)

diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala
index cb816d6..c0c57b8 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/FileTable.scala
@@ -54,7 +54,7 @@ abstract class FileTable(
     inferSchema(fileIndex.allFiles())
   }.getOrElse {
     throw new AnalysisException(
-      s"Unable to infer schema for $name. It must be specified manually.")
+      s"Unable to infer schema for $formatName. It must be specified 
manually.")
   }.asNullable
 
   override lazy val schema: StructType = {
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
index fe40b9a..18ec3e3 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/orc/OrcQuerySuite.scala
@@ -580,7 +580,7 @@ abstract class OrcQueryTest extends OrcTest {
       val m1 = intercept[AnalysisException] {
         testAllCorruptFiles()
       }.getMessage
-      assert(m1.contains("Unable to infer schema"))
+      assert(m1.contains("Unable to infer schema for ORC"))
       testAllCorruptFilesWithoutSchemaInfer()
     }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
index 2569085..9f96947 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala
@@ -428,7 +428,7 @@ class DataFrameReaderWriterSuite extends QueryTest with 
SharedSQLContext with Be
     val message = intercept[AnalysisException] {
       testRead(spark.read.csv(), Seq.empty, schema)
     }.getMessage
-    assert(message.toLowerCase(Locale.ROOT).contains("unable to infer schema 
for csv"))
+    assert(message.contains("Unable to infer schema for CSV. It must be 
specified manually."))
 
     testRead(spark.read.csv(dir), data, schema)
     testRead(spark.read.csv(dir, dir), data ++ data, schema)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to