beliefer commented on a change in pull request #31757:
URL: https://github.com/apache/spark/pull/31757#discussion_r590977493



##########
File path: 
sql/catalyst/src/main/scala/org/apache/spark/sql/errors/QueryExecutionErrors.scala
##########
@@ -322,4 +323,237 @@ object QueryExecutionErrors {
   def compilerError(e: CompileException): Throwable = {
     new CompileException(failedToCompileMsg(e), e.getLocation)
   }
+
+  def dataPathNotSpecifiedError(): Throwable = {
+    new IllegalArgumentException("'path' is not specified")
+  }
+
+  def createStreamingSourceNotSpecifySchemaError(): Throwable = {
+    new IllegalArgumentException(
+      s"""
+         |Schema must be specified when creating a streaming source DataFrame. 
If some
+         |files already exist in the directory, then depending on the file 
format you
+         |may be able to create a static DataFrame on that directory with
+         |'spark.read.load(directory)' and infer schema from it.
+       """.stripMargin)
+  }
+
+  def streamedOperatorUnsupportedByDataSourceError(
+      className: String, operator: String): Throwable = {
+    new UnsupportedOperationException(
+      s"Data source $className does not support streamed $operator")
+  }
+
+  def allPathsNotExpectedExactlyOneError(allPaths: Seq[String]): Throwable = {
+    new IllegalArgumentException("Expected exactly one path to be specified, 
but " +
+      s"got: ${allPaths.mkString(", ")}")
+  }
+
+  def failedFindDataSourceError(provider: String, error: Throwable): Throwable 
= {
+    new ClassNotFoundException(
+      s"""
+         |Failed to find data source: $provider. Please find packages at
+         |http://spark.apache.org/third-party-projects.html
+       """.stripMargin, error)
+  }
+
+  def useSpark2RemovedClassesError(className: String, e: Throwable): Throwable 
= {
+    new ClassNotFoundException(s"$className was removed in Spark 2.0. " +
+      "Please check if your library is compatible with Spark 2.0", e)
+  }
+
+  def findIncompatibleDataSourceRegisterError(e: Throwable): Throwable = {
+    new ClassNotFoundException(
+      s"""
+         |Detected an incompatible DataSourceRegister. Please remove the 
incompatible
+         |library from classpath or upgrade it. Error: ${e.getMessage}
+       """.stripMargin, e)
+  }
+
+  def unrecognizedFileFormatError(format: String): Throwable = {
+    new IllegalStateException(s"unrecognized format $format")
+  }
+
+  def sparkUpgradeInReadError(
+      format: String, config: String, option: String): SparkUpgradeException = 
{
+    new SparkUpgradeException("3.0",
+      s"""
+         |reading dates before 1582-10-15 or timestamps before 
1900-01-01T00:00:00Z from $format
+         |files can be ambiguous, as the files may be written by Spark 2.x or 
legacy versions of
+         |Hive, which uses a legacy hybrid calendar that is different from 
Spark 3.0+'s Proleptic
+         |Gregorian calendar. See more details in SPARK-31404. You can set the 
SQL config
+         |'$config' or the datasource option '$option' to 'LEGACY' to rebase 
the datetime values
+         |w.r.t. the calendar difference during reading. To read the datetime 
values as it is,
+         |set the SQL config '$config' or the datasource option '$option' to 
'CORRECTED'.
+       """.stripMargin, null)
+  }
+
+  def sparkUpgradeInWriteError(format: String, config: String): 
SparkUpgradeException = {
+    new SparkUpgradeException("3.0",
+      s"""
+         |writing dates before 1582-10-15 or timestamps before 
1900-01-01T00:00:00Z into $format
+         |files can be dangerous, as the files may be read by Spark 2.x or 
legacy versions of Hive
+         |later, which uses a legacy hybrid calendar that is different from 
Spark 3.0+'s Proleptic
+         |Gregorian calendar. See more details in SPARK-31404. You can set 
$config to 'LEGACY' to
+         |rebase the datetime values w.r.t. the calendar difference during 
writing, to get maximum
+         |interoperability. Or set $config to 'CORRECTED' to write the 
datetime values as it is,
+         |if you are 100% sure that the written files will only be read by 
Spark 3.0+ or other
+         |systems that use Proleptic Gregorian calendar.
+       """.stripMargin, null)
+  }
+
+  def buildReaderUnsupportedForFileFormatError(format: String): Throwable = {
+    new UnsupportedOperationException(s"buildReader is not supported for 
$format")
+  }
+
+  def jobAbortedError(cause: Throwable): Throwable = {
+    new SparkException("Job aborted.", cause)
+  }
+
+  def taskFailedWhileWriteRowsError(cause: Throwable): Throwable = {
+    new SparkException("Task failed while writing rows.", cause)
+  }
+
+  def readCurrentFileNotFoundError(e: FileNotFoundException): Throwable = {
+    new FileNotFoundException(
+      s"""
+         |${e.getMessage}\n
+         |It is possible the underlying files have been updated. You can 
explicitly invalidate
+         |the cache in Spark by running 'REFRESH TABLE tableName' command in 
SQL or by
+         |recreating the Dataset/DataFrame involved.
+       """.stripMargin)
+  }
+
+  def unsupportedSaveModeError(saveMode: String, pathExists: Boolean): 
Throwable = {
+    new IllegalStateException(s"unsupported save mode $saveMode ($pathExists)")
+  }
+
+  def unableClearOutputDirectoryError(staticPrefixPath: Path): Throwable = {
+    new IOException(s"Unable to clear output directory $staticPrefixPath prior 
to writing to it")
+  }
+
+  def unableClearPartitionDirectoryError(path: Path): Throwable = {
+    new IOException(s"Unable to clear partition directory $path prior to 
writing to it")
+  }
+
+  def failedCastValueToDataTypeForPartitionColumnError(
+      value: String, dataType: DataType, columnName: String): Throwable = {
+    new RuntimeException(s"Failed to cast value `$value` to " +
+      s"`$dataType` for partition column `$columnName`")
+  }
+
+  def typeUnsupportedError(dataType: DataType): Throwable = {

Review comment:
       OK




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
[email protected]



---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to