This is an automated email from the ASF dual-hosted git repository.

maxgekk pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new e6e242e0181 [SPARK-43839][SQL] Convert `_LEGACY_ERROR_TEMP_1337` to 
`UNSUPPORTED_FEATURE.TIME_TRAVEL`
e6e242e0181 is described below

commit e6e242e01813ddcc735f61a668059ed648a6cefb
Author: panbingkun <pbk1...@gmail.com>
AuthorDate: Sun May 28 21:15:24 2023 +0300

    [SPARK-43839][SQL] Convert `_LEGACY_ERROR_TEMP_1337` to 
`UNSUPPORTED_FEATURE.TIME_TRAVEL`
    
    ### What changes were proposed in this pull request?
    The pr aims to convert `_LEGACY_ERROR_TEMP_1337` to 
`UNSUPPORTED_FEATURE.TIME_TRAVEL` and remove `_LEGACY_ERROR_TEMP_1335`
    
    ### Why are the changes needed?
    - The changes improve the error framework.
    - In the spark base code `_ LEGACY_ ERROR_ TEMP_ 1335` is no longer used 
anywhere.
    
    ### Does this PR introduce _any_ user-facing change?
    No.
    
    ### How was this patch tested?
    - Add new UT
    - Pass GA
    
    Closes #41349 from panbingkun/SPARK-43839.
    
    Authored-by: panbingkun <pbk1...@gmail.com>
    Signed-off-by: Max Gekk <max.g...@gmail.com>
---
 core/src/main/resources/error/error-classes.json            | 10 ----------
 .../apache/spark/sql/errors/QueryCompilationErrors.scala    |  6 ------
 .../sql/execution/datasources/v2/V2SessionCatalog.scala     |  6 ++++--
 .../apache/spark/sql/errors/QueryExecutionErrorsSuite.scala | 13 +++++++++++++
 4 files changed, 17 insertions(+), 18 deletions(-)

diff --git a/core/src/main/resources/error/error-classes.json 
b/core/src/main/resources/error/error-classes.json
index 36125d2cbae..f7c0879e1a2 100644
--- a/core/src/main/resources/error/error-classes.json
+++ b/core/src/main/resources/error/error-classes.json
@@ -4015,16 +4015,6 @@
       "Cannot specify both version and timestamp when time travelling the 
table."
     ]
   },
-  "_LEGACY_ERROR_TEMP_1335" : {
-    "message" : [
-      "<expr> is not a valid timestamp expression for time travel."
-    ]
-  },
-  "_LEGACY_ERROR_TEMP_1337" : {
-    "message" : [
-      "Table <tableName> does not support time travel."
-    ]
-  },
   "_LEGACY_ERROR_TEMP_1338" : {
     "message" : [
       "Sinks cannot request distribution and ordering in continuous execution 
mode."
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
index 05b829838aa..45a9a03df4d 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryCompilationErrors.scala
@@ -3152,12 +3152,6 @@ private[sql] object QueryCompilationErrors extends 
QueryErrorsBase {
       messageParameters = Map("relationId" -> relationId))
   }
 
-  def tableNotSupportTimeTravelError(tableName: Identifier): Throwable = {
-    new AnalysisException(
-      errorClass = "_LEGACY_ERROR_TEMP_1337",
-      messageParameters = Map("tableName" -> tableName.toString))
-  }
-
   def writeDistributionAndOrderingNotSupportedInContinuousExecution(): 
Throwable = {
     new AnalysisException(
       errorClass = "_LEGACY_ERROR_TEMP_1338",
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
index 437194b7b5b..8234fb5a0b1 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
@@ -89,10 +89,12 @@ class V2SessionCatalog(catalog: SessionCatalog)
           throw QueryCompilationErrors.timeTravelUnsupportedError(
             toSQLId(catalogTable.identifier.nameParts))
         } else {
-          throw QueryCompilationErrors.tableNotSupportTimeTravelError(ident)
+          throw QueryCompilationErrors.timeTravelUnsupportedError(
+            toSQLId(catalogTable.identifier.nameParts))
         }
 
-      case _ => throw 
QueryCompilationErrors.tableNotSupportTimeTravelError(ident)
+      case _ => throw QueryCompilationErrors.timeTravelUnsupportedError(
+        toSQLId(ident.asTableIdentifier.nameParts))
     }
   }
 
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
index 377596466db..4bcb1d115b7 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/errors/QueryExecutionErrorsSuite.scala
@@ -886,6 +886,19 @@ class QueryExecutionErrorsSuite
       errorClass = "_LEGACY_ERROR_TEMP_2249",
       parameters = Map("maxBroadcastTableBytes" -> "1024.0 MiB", "dataSize" -> 
"2048.0 MiB"))
   }
+
+  test("V1 table don't support time travel") {
+    withTable("t") {
+      sql("CREATE TABLE t(c String) USING parquet")
+      checkError(
+        exception = intercept[AnalysisException] {
+          sql("SELECT * FROM t TIMESTAMP AS OF '2021-01-29 
00:00:00'").collect()
+        },
+        errorClass = "UNSUPPORTED_FEATURE.TIME_TRAVEL",
+        parameters = Map("relationId" -> "`spark_catalog`.`default`.`t`")
+      )
+    }
+  }
 }
 
 class FakeFileSystemSetPermission extends LocalFileSystem {


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to