Repository: spark
Updated Branches:
  refs/heads/master 9e4928b7e -> 6ba17cd14


[SPARK-14414][SQL] Make DDL exceptions more consistent

## What changes were proposed in this pull request?

Just a bunch of small tweaks on DDL exception messages.

## How was this patch tested?

`DDLCommandSuite` et al.

Author: Andrew Or <and...@databricks.com>

Closes #12853 from andrewor14/make-exceptions-consistent.


Project: http://git-wip-us.apache.org/repos/asf/spark/repo
Commit: http://git-wip-us.apache.org/repos/asf/spark/commit/6ba17cd1
Tree: http://git-wip-us.apache.org/repos/asf/spark/tree/6ba17cd1
Diff: http://git-wip-us.apache.org/repos/asf/spark/diff/6ba17cd1

Branch: refs/heads/master
Commit: 6ba17cd147277a20a7fbb244c040e694de486c36
Parents: 9e4928b
Author: Andrew Or <and...@databricks.com>
Authored: Tue May 3 18:07:53 2016 -0700
Committer: Andrew Or <and...@databricks.com>
Committed: Tue May 3 18:07:53 2016 -0700

----------------------------------------------------------------------
 .../apache/spark/sql/catalyst/parser/SqlBase.g4 |  26 +--
 .../catalyst/analysis/NoSuchItemException.scala |  14 +-
 .../sql/catalyst/catalog/InMemoryCatalog.scala  |  10 +-
 .../sql/catalyst/catalog/SessionCatalog.scala   |  18 +-
 .../spark/sql/catalyst/parser/ParserUtils.scala |   2 +-
 .../catalyst/catalog/SessionCatalogSuite.scala  |   6 +-
 .../spark/sql/execution/SparkSqlParser.scala    | 152 +++-------------
 .../sql/execution/command/AnalyzeTable.scala    |   6 +-
 .../spark/sql/execution/command/ddl.scala       |  70 +-------
 .../spark/sql/execution/command/functions.scala |  10 +-
 .../spark/sql/execution/command/tables.scala    |  39 ++---
 .../spark/sql/execution/command/views.scala     |   3 +-
 .../sql/execution/command/DDLCommandSuite.scala | 174 +++----------------
 .../sql/sources/CreateTableAsSelectSuite.scala  |  13 +-
 .../hive/execution/HiveCompatibilitySuite.scala |  10 +-
 .../spark/sql/hive/HiveExternalCatalog.scala    |   2 +-
 .../spark/sql/hive/client/HiveClientImpl.scala  |  12 +-
 .../sql/hive/execution/HiveCommandSuite.scala   |   4 +-
 .../sql/hive/execution/SQLQuerySuite.scala      |   2 +-
 .../spark/sql/hive/execution/SQLViewSuite.scala |   3 +-
 20 files changed, 141 insertions(+), 435 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4 
b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
index 3ab448d..273ad92 100644
--- 
a/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
+++ 
b/sql/catalyst/src/main/antlr4/org/apache/spark/sql/catalyst/parser/SqlBase.g4
@@ -81,18 +81,8 @@ statement
         DROP (IF EXISTS)? partitionSpec (',' partitionSpec)* PURGE?    
#dropTablePartitions
     | ALTER VIEW tableIdentifier
         DROP (IF EXISTS)? partitionSpec (',' partitionSpec)*           
#dropTablePartitions
-    | ALTER TABLE tableIdentifier partitionSpec?
-        SET FILEFORMAT fileFormat                                      
#setTableFileFormat
     | ALTER TABLE tableIdentifier partitionSpec? SET locationSpec      
#setTableLocation
-    | ALTER TABLE tableIdentifier partitionSpec?
-        CHANGE COLUMN? oldName=identifier colType
-        (FIRST | AFTER after=identifier)? (CASCADE | RESTRICT)?        
#changeColumn
-    | ALTER TABLE tableIdentifier partitionSpec?
-        ADD COLUMNS '(' colTypeList ')' (CASCADE | RESTRICT)?          
#addColumns
-    | ALTER TABLE tableIdentifier partitionSpec?
-        REPLACE COLUMNS '(' colTypeList ')' (CASCADE | RESTRICT)?      
#replaceColumns
-    | DROP TABLE (IF EXISTS)? tableIdentifier PURGE?
-        (FOR METADATA? REPLICATION '(' STRING ')')?                    
#dropTable
+    | DROP TABLE (IF EXISTS)? tableIdentifier PURGE?                   
#dropTable
     | DROP VIEW (IF EXISTS)? tableIdentifier                           
#dropTable
     | CREATE (OR REPLACE)? VIEW (IF NOT EXISTS)? tableIdentifier
         identifierCommentList? (COMMENT STRING)?
@@ -170,6 +160,10 @@ unsupportedHiveNativeCommands
     | kw1=ALTER kw2=TABLE tableIdentifier kw3=TOUCH
     | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=COMPACT
     | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=CONCATENATE
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=SET kw4=FILEFORMAT
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=ADD kw4=COLUMNS
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=CHANGE 
kw4=COLUMNS?
+    | kw1=ALTER kw2=TABLE tableIdentifier partitionSpec? kw3=REPLACE 
kw4=COLUMNS
     | kw1=START kw2=TRANSACTION
     | kw1=COMMIT
     | kw1=ROLLBACK
@@ -647,9 +641,9 @@ nonReserved
     | NO | DATA
     | START | TRANSACTION | COMMIT | ROLLBACK
     | SORT | CLUSTER | DISTRIBUTE | UNSET | TBLPROPERTIES | SKEWED | STORED | 
DIRECTORIES | LOCATION
-    | EXCHANGE | ARCHIVE | UNARCHIVE | FILEFORMAT | TOUCH | COMPACT | 
CONCATENATE | CHANGE | FIRST
-    | AFTER | CASCADE | RESTRICT | BUCKETS | CLUSTERED | SORTED | PURGE | 
INPUTFORMAT | OUTPUTFORMAT
-    | DBPROPERTIES | DFS | TRUNCATE | METADATA | REPLICATION | COMPUTE
+    | EXCHANGE | ARCHIVE | UNARCHIVE | FILEFORMAT | TOUCH | COMPACT | 
CONCATENATE | CHANGE
+    | CASCADE | RESTRICT | BUCKETS | CLUSTERED | SORTED | PURGE | INPUTFORMAT 
| OUTPUTFORMAT
+    | DBPROPERTIES | DFS | TRUNCATE | COMPUTE
     | STATISTICS | ANALYZE | PARTITIONED | EXTERNAL | DEFINED | RECORDWRITER
     | REVOKE | GRANT | LOCK | UNLOCK | MSCK | REPAIR | EXPORT | IMPORT | LOAD 
| VALUES | COMMENT | ROLE
     | ROLES | COMPACTIONS | PRINCIPALS | TRANSACTIONS | INDEX | INDEXES | 
LOCKS | OPTION | LOCAL | INPATH
@@ -836,8 +830,6 @@ TOUCH: 'TOUCH';
 COMPACT: 'COMPACT';
 CONCATENATE: 'CONCATENATE';
 CHANGE: 'CHANGE';
-FIRST: 'FIRST';
-AFTER: 'AFTER';
 CASCADE: 'CASCADE';
 RESTRICT: 'RESTRICT';
 CLUSTERED: 'CLUSTERED';
@@ -849,8 +841,6 @@ DATABASE: 'DATABASE' | 'SCHEMA';
 DATABASES: 'DATABASES' | 'SCHEMAS';
 DFS: 'DFS';
 TRUNCATE: 'TRUNCATE';
-METADATA: 'METADATA';
-REPLICATION: 'REPLICATION';
 ANALYZE: 'ANALYZE';
 COMPUTE: 'COMPUTE';
 STATISTICS: 'STATISTICS';

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/NoSuchItemException.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/NoSuchItemException.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/NoSuchItemException.scala
index 11ef9e1..2412ec4 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/NoSuchItemException.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/analysis/NoSuchItemException.scala
@@ -25,13 +25,17 @@ import 
org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
  * Thrown by a catalog when an item cannot be found. The analyzer will rethrow 
the exception
  * as an [[org.apache.spark.sql.AnalysisException]] with the correct position 
information.
  */
-class NoSuchDatabaseException(db: String) extends AnalysisException(s"Database 
$db not found")
+class NoSuchDatabaseException(db: String) extends AnalysisException(s"Database 
'$db' not found")
 
 class NoSuchTableException(db: String, table: String)
-  extends AnalysisException(s"Table or View $table not found in database $db")
+  extends AnalysisException(s"Table or view '$table' not found in database 
'$db'")
 
-class NoSuchPartitionException(db: String, table: String, spec: 
TablePartitionSpec) extends
-    AnalysisException(s"Partition not found in table $table database $db:\n" + 
spec.mkString("\n"))
+class NoSuchPartitionException(
+    db: String,
+    table: String,
+    spec: TablePartitionSpec)
+  extends AnalysisException(
+    s"Partition not found in table '$table' database '$db':\n" + 
spec.mkString("\n"))
 
 class NoSuchFunctionException(db: String, func: String)
-  extends AnalysisException(s"Function $func not found in database $db")
+  extends AnalysisException(s"Function '$func' not found in database '$db'")

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
index 60eb732..1d2ca28 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
@@ -164,7 +164,7 @@ class InMemoryCatalog extends ExternalCatalog {
       catalog(db).tables.remove(table)
     } else {
       if (!ignoreIfNotExists) {
-        throw new AnalysisException(s"Table or View '$table' does not exist in 
database '$db'")
+        throw new AnalysisException(s"Table or view '$table' does not exist in 
database '$db'")
       }
     }
   }
@@ -211,7 +211,7 @@ class InMemoryCatalog extends ExternalCatalog {
       loadPath: String,
       isOverwrite: Boolean,
       holdDDLTime: Boolean): Unit = {
-    throw new AnalysisException("loadTable is not implemented for 
InMemoryCatalog.")
+    throw new UnsupportedOperationException("loadTable is not implemented")
   }
 
   override def loadPartition(
@@ -223,7 +223,7 @@ class InMemoryCatalog extends ExternalCatalog {
       holdDDLTime: Boolean,
       inheritTableSpecs: Boolean,
       isSkewedStoreAsSubdir: Boolean): Unit = {
-    throw new AnalysisException("loadPartition is not implemented for 
InMemoryCatalog.")
+    throw new UnsupportedOperationException("loadPartition is not 
implemented.")
   }
 
   // --------------------------------------------------------------------------
@@ -304,8 +304,8 @@ class InMemoryCatalog extends ExternalCatalog {
       partialSpec: Option[TablePartitionSpec] = None): 
Seq[CatalogTablePartition] = synchronized {
     requireTableExists(db, table)
     if (partialSpec.nonEmpty) {
-      throw new AnalysisException("listPartition does not support partition 
spec in " +
-        "InMemoryCatalog.")
+      throw new UnsupportedOperationException(
+        "listPartition with partial partition spec is not implemented")
     }
     catalog(db).tables(table).partitions.values.toSeq
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
index a445a25..ff63034 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
@@ -17,8 +17,6 @@
 
 package org.apache.spark.sql.catalyst.catalog
 
-import java.io.File
-
 import scala.collection.mutable
 
 import org.apache.hadoop.conf.Configuration
@@ -284,10 +282,12 @@ class SessionCatalog(
    * This assumes the database specified in `oldName` matches the one 
specified in `newName`.
    */
   def renameTable(oldName: TableIdentifier, newName: TableIdentifier): Unit = {
-    if (oldName.database != newName.database) {
-      throw new AnalysisException("rename does not support moving tables 
across databases")
-    }
     val db = oldName.database.getOrElse(currentDb)
+    val newDb = newName.database.getOrElse(currentDb)
+    if (db != newDb) {
+      throw new AnalysisException(
+        s"RENAME TABLE source and destination databases do not match: '$db' != 
'$newDb'")
+    }
     val oldTableName = formatTableName(oldName.table)
     val newTableName = formatTableName(newName.table)
     if (oldName.database.isDefined || !tempTables.contains(oldTableName)) {
@@ -315,7 +315,7 @@ class SessionCatalog(
       if (externalCatalog.tableExists(db, table)) {
         externalCatalog.dropTable(db, table, ignoreIfNotExists = true)
       } else if (!ignoreIfNotExists) {
-        logError(s"Table or View '${name.quotedString}' does not exist")
+        throw new AnalysisException(s"Table or view '${name.quotedString}' 
does not exist")
       }
     } else {
       tempTables.remove(table)
@@ -534,7 +534,7 @@ class SessionCatalog(
     if (!functionExists(identifier)) {
       externalCatalog.createFunction(db, newFuncDefinition)
     } else if (!ignoreIfExists) {
-      throw new AnalysisException(s"function '$identifier' already exists in 
database '$db'")
+      throw new AnalysisException(s"Function '$identifier' already exists in 
database '$db'")
     }
   }
 
@@ -632,9 +632,9 @@ class SessionCatalog(
   }
 
   protected def failFunctionLookup(name: String): Nothing = {
-    throw new AnalysisException(s"Undefined function: $name. This function is 
" +
+    throw new AnalysisException(s"Undefined function: '$name'. This function 
is " +
       s"neither a registered temporary function nor " +
-      s"a permanent function registered in the database $currentDb.")
+      s"a permanent function registered in the database '$currentDb'.")
   }
 
   /**

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
index 64713cd..58e2bdb 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/parser/ParserUtils.scala
@@ -39,7 +39,7 @@ object ParserUtils {
     stream.getText(Interval.of(0, stream.size()))
   }
 
-  def parseException(message: String, ctx: ParserRuleContext): ParseException 
= {
+  def operationNotAllowed(message: String, ctx: ParserRuleContext): 
ParseException = {
     new ParseException(s"Operation not allowed: $message", ctx)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
index ba5d8ce..a704ca7 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
@@ -233,9 +233,9 @@ class SessionCatalogSuite extends SparkFunSuite {
     intercept[AnalysisException] {
       catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), 
ignoreIfNotExists = true)
     }
-    // If the table does not exist, we do not issue an exception. Instead, we 
output an error log
-    // message to console when ignoreIfNotExists is set to false.
-    catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), 
ignoreIfNotExists = false)
+    intercept[AnalysisException] {
+      catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), 
ignoreIfNotExists = false)
+    }
     catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), 
ignoreIfNotExists = true)
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
index dfc56a7..b000cc9 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/SparkSqlParser.scala
@@ -142,7 +142,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
   }
 
   /**
-   * A command for users to list the columm names for a table.
+   * A command for users to list the column names for a table.
    * This function creates a [[ShowColumnsCommand]] logical plan.
    *
    * The syntax of using this command in SQL is:
@@ -155,8 +155,10 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder 
{
 
     val lookupTable = Option(ctx.db) match {
       case None => table
-      case Some(db) if table.database.isDefined =>
-        throw new ParseException("Duplicates the declaration for database", 
ctx)
+      case Some(db) if table.database.exists(_ != db) =>
+        throw operationNotAllowed(
+          s"SHOW COLUMNS with conflicting databases: '$db' != 
'${table.database.get}'",
+          ctx)
       case Some(db) => TableIdentifier(table.identifier, Some(db.getText))
     }
     ShowColumnsCommand(lookupTable)
@@ -214,7 +216,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
   override def visitExplain(ctx: ExplainContext): LogicalPlan = 
withOrigin(ctx) {
     val options = ctx.explainOption.asScala
     if (options.exists(_.FORMATTED != null)) {
-      logWarning("Unsupported operation: EXPLAIN FORMATTED option")
+      throw operationNotAllowed("EXPLAIN FORMATTED", ctx)
     }
 
     // Create the explain comment.
@@ -260,9 +262,9 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
       ctx: CreateTableHeaderContext): TableHeader = withOrigin(ctx) {
     val temporary = ctx.TEMPORARY != null
     val ifNotExists = ctx.EXISTS != null
-    assert(!temporary || !ifNotExists,
-      "a CREATE TEMPORARY TABLE statement does not allow IF NOT EXISTS 
clause.",
-      ctx)
+    if (temporary && ifNotExists) {
+      throw operationNotAllowed("CREATE TEMPORARY TABLE ... IF NOT EXISTS", 
ctx)
+    }
     (visitTableIdentifier(ctx.tableIdentifier), temporary, ifNotExists, 
ctx.EXTERNAL != null)
   }
 
@@ -274,7 +276,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
   override def visitCreateTableUsing(ctx: CreateTableUsingContext): 
LogicalPlan = withOrigin(ctx) {
     val (table, temp, ifNotExists, external) = 
visitCreateTableHeader(ctx.createTableHeader)
     if (external) {
-      throw new ParseException("Unsupported operation: EXTERNAL option", ctx)
+      throw operationNotAllowed("CREATE EXTERNAL TABLE ... USING", ctx)
     }
     val options = 
Option(ctx.tablePropertyList).map(visitTablePropertyList).getOrElse(Map.empty)
     val provider = ctx.tableProvider.qualifiedName.getText
@@ -423,7 +425,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
         case "jar" | "file" | "archive" =>
           resourceType -> string(resource.STRING)
         case other =>
-          throw new ParseException(s"Resource Type '$resourceType' is not 
supported.", ctx)
+          throw operationNotAllowed(s"CREATE FUNCTION with resource type 
'$resourceType'", ctx)
       }
     }
 
@@ -459,10 +461,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder 
{
    */
   override def visitDropTable(ctx: DropTableContext): LogicalPlan = 
withOrigin(ctx) {
     if (ctx.PURGE != null) {
-      throw new ParseException("Unsupported operation: PURGE option", ctx)
-    }
-    if (ctx.REPLICATION != null) {
-      throw new ParseException("Unsupported operation: REPLICATION clause", 
ctx)
+      throw operationNotAllowed("DROP TABLE ... PURGE", ctx)
     }
     DropTable(
       visitTableIdentifier(ctx.tableIdentifier),
@@ -554,7 +553,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
   override def visitAddTablePartition(
       ctx: AddTablePartitionContext): LogicalPlan = withOrigin(ctx) {
     if (ctx.VIEW != null) {
-      throw new ParseException(s"Operation not allowed: partitioned views", 
ctx)
+      throw operationNotAllowed("ALTER VIEW ... ADD PARTITION", ctx)
     }
     // Create partition spec to location mapping.
     val specsAndLocs = if (ctx.partitionSpec.isEmpty) {
@@ -605,10 +604,10 @@ class SparkSqlAstBuilder(conf: SQLConf) extends 
AstBuilder {
   override def visitDropTablePartitions(
       ctx: DropTablePartitionsContext): LogicalPlan = withOrigin(ctx) {
     if (ctx.VIEW != null) {
-      throw new ParseException(s"Operation not allowed: partitioned views", 
ctx)
+      throw operationNotAllowed("ALTER VIEW ... DROP PARTITION", ctx)
     }
     if (ctx.PURGE != null) {
-      throw new ParseException(s"Operation not allowed: PURGE", ctx)
+      throw operationNotAllowed("ALTER TABLE ... DROP PARTITION ... PURGE", 
ctx)
     }
     AlterTableDropPartition(
       visitTableIdentifier(ctx.tableIdentifier),
@@ -617,35 +616,6 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder 
{
   }
 
   /**
-   * Create an [[AlterTableSetFileFormat]] command
-   *
-   * For example:
-   * {{{
-   *   ALTER TABLE table [PARTITION spec] SET FILEFORMAT file_format;
-   * }}}
-   */
-  override def visitSetTableFileFormat(
-      ctx: SetTableFileFormatContext): LogicalPlan = withOrigin(ctx) {
-    // AlterTableSetFileFormat currently takes both a GenericFileFormat and a
-    // TableFileFormatContext. This is a bit weird because it should only take 
one. It also should
-    // use a CatalogFileFormat instead of either a String or a Sequence of 
Strings. We will address
-    // this in a follow-up PR.
-    val (fileFormat, genericFormat) = ctx.fileFormat match {
-      case s: GenericFileFormatContext =>
-        (Seq.empty[String], Option(s.identifier.getText))
-      case s: TableFileFormatContext =>
-        val elements = Seq(s.inFmt, s.outFmt) ++ Option(s.serdeCls).toSeq
-        (elements.map(string), None)
-    }
-    AlterTableSetFileFormat(
-      visitTableIdentifier(ctx.tableIdentifier),
-      Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
-      fileFormat,
-      genericFormat)(
-      parseException("ALTER TABLE SET FILEFORMAT", ctx))
-  }
-
-  /**
    * Create an [[AlterTableSetLocation]] command
    *
    * For example:
@@ -661,79 +631,6 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder 
{
   }
 
   /**
-   * Create an [[AlterTableChangeCol]] command
-   *
-   * For example:
-   * {{{
-   *   ALTER TABLE tableIdentifier [PARTITION spec]
-   *    CHANGE [COLUMN] col_old_name col_new_name column_type [COMMENT 
col_comment]
-   *    [FIRST|AFTER column_name] [CASCADE|RESTRICT];
-   * }}}
-   */
-  override def visitChangeColumn(ctx: ChangeColumnContext): LogicalPlan = 
withOrigin(ctx) {
-    val col = visitColType(ctx.colType())
-    val comment = if (col.metadata.contains("comment")) {
-      Option(col.metadata.getString("comment"))
-    } else {
-      None
-    }
-
-    AlterTableChangeCol(
-      visitTableIdentifier(ctx.tableIdentifier),
-      Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
-      ctx.oldName.getText,
-      // We could also pass in a struct field - seems easier.
-      col.name,
-      col.dataType,
-      comment,
-      Option(ctx.after).map(_.getText),
-      // Note that Restrict and Cascade are mutually exclusive.
-      ctx.RESTRICT != null,
-      ctx.CASCADE != null)(
-      parseException("ALTER TABLE CHANGE COLUMN", ctx))
-  }
-
-  /**
-   * Create an [[AlterTableAddCol]] command
-   *
-   * For example:
-   * {{{
-   *   ALTER TABLE tableIdentifier [PARTITION spec]
-   *    ADD COLUMNS (name type [COMMENT comment], ...) [CASCADE|RESTRICT]
-   * }}}
-   */
-  override def visitAddColumns(ctx: AddColumnsContext): LogicalPlan = 
withOrigin(ctx) {
-    AlterTableAddCol(
-      visitTableIdentifier(ctx.tableIdentifier),
-      Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
-      createStructType(ctx.colTypeList),
-      // Note that Restrict and Cascade are mutually exclusive.
-      ctx.RESTRICT != null,
-      ctx.CASCADE != null)(
-      parseException("ALTER TABLE ADD COLUMNS", ctx))
-  }
-
-  /**
-   * Create an [[AlterTableReplaceCol]] command
-   *
-   * For example:
-   * {{{
-   *   ALTER TABLE tableIdentifier [PARTITION spec]
-   *    REPLACE COLUMNS (name type [COMMENT comment], ...) [CASCADE|RESTRICT]
-   * }}}
-   */
-  override def visitReplaceColumns(ctx: ReplaceColumnsContext): LogicalPlan = 
withOrigin(ctx) {
-    AlterTableReplaceCol(
-      visitTableIdentifier(ctx.tableIdentifier),
-      Option(ctx.partitionSpec).map(visitNonOptionalPartitionSpec),
-      createStructType(ctx.colTypeList),
-      // Note that Restrict and Cascade are mutually exclusive.
-      ctx.RESTRICT != null,
-      ctx.CASCADE != null)(
-      parseException("ALTER TABLE REPLACE COLUMNS", ctx))
-  }
-
-  /**
    * Create location string.
    */
   override def visitLocationSpec(ctx: LocationSpecContext): String = 
withOrigin(ctx) {
@@ -753,7 +650,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
         .map { orderedIdCtx =>
           Option(orderedIdCtx.ordering).map(_.getText).foreach { dir =>
             if (dir.toLowerCase != "asc") {
-              throw parseException("Only ASC ordering is supported for sorting 
columns", ctx)
+              throw operationNotAllowed(s"Column ordering must be ASC, was 
'$dir'", ctx)
             }
           }
 
@@ -789,7 +686,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
       // SET ROLE is the exception to the rule, because we handle this before 
other SET commands.
       "SET ROLE"
     }
-    throw parseException(keywords, ctx)
+    throw operationNotAllowed(keywords, ctx)
   }
 
   /**
@@ -799,7 +696,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
     ctx.identifier.getText.toLowerCase match {
       case "file" => AddFile(remainder(ctx.identifier).trim)
       case "jar" => AddJar(remainder(ctx.identifier).trim)
-      case other => throw new ParseException(s"Unsupported resource type 
'$other'.", ctx)
+      case other => throw operationNotAllowed(s"ADD with resource type 
'$other'", ctx)
     }
   }
 
@@ -836,10 +733,10 @@ class SparkSqlAstBuilder(conf: SQLConf) extends 
AstBuilder {
           "Please use registerTempTable as an alternative.", ctx)
     }
     if (ctx.skewSpec != null) {
-      throw new ParseException("Operation not allowed: CREATE TABLE ... SKEWED 
BY ...", ctx)
+      throw operationNotAllowed("CREATE TABLE ... SKEWED BY", ctx)
     }
     if (ctx.bucketSpec != null) {
-      throw new ParseException("Operation not allowed: CREATE TABLE ... 
CLUSTERED BY ...", ctx)
+      throw operationNotAllowed("CREATE TABLE ... CLUSTERED BY", ctx)
     }
     val tableType = if (external) {
       CatalogTableType.EXTERNAL
@@ -926,9 +823,9 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
       case (c: GenericFileFormatContext, null) =>
         visitGenericFileFormat(c)
       case (null, storageHandler) =>
-        throw new ParseException("Operation not allowed: ... STORED BY 
storage_handler ...", ctx)
+        throw operationNotAllowed("STORED BY", ctx)
       case _ =>
-        throw new ParseException("expected either STORED AS or STORED BY, not 
both", ctx)
+        throw new ParseException("Expected either STORED AS or STORED BY, not 
both", ctx)
     }
   }
 
@@ -960,7 +857,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder {
           outputFormat = s.outputFormat,
           serde = s.serde)
       case None =>
-        throw new ParseException(s"Unrecognized file format in STORED AS 
clause: $source", ctx)
+        throw operationNotAllowed(s"STORED AS with file format '$source'", ctx)
     }
   }
 
@@ -1041,7 +938,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends AstBuilder 
{
    */
   override def visitCreateView(ctx: CreateViewContext): LogicalPlan = 
withOrigin(ctx) {
     if (ctx.identifierList != null) {
-      throw new ParseException(s"Operation not allowed: partitioned views", 
ctx)
+      throw operationNotAllowed("CREATE VIEW ... PARTITIONED ON", ctx)
     } else {
       val identifiers = 
Option(ctx.identifierCommentList).toSeq.flatMap(_.identifierComment.asScala)
       val schema = identifiers.map { ic =>
@@ -1128,6 +1025,7 @@ class SparkSqlAstBuilder(conf: SQLConf) extends 
AstBuilder {
       recordReader: Token,
       schemaLess: Boolean): ScriptInputOutputSchema = {
     if (recordWriter != null || recordReader != null) {
+      // TODO: what does this message mean?
       throw new ParseException(
         "Unsupported operation: Used defined record reader/writer classes.", 
ctx)
     }

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTable.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTable.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTable.scala
index 54ff5ae..de2db44 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTable.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/AnalyzeTable.scala
@@ -21,7 +21,7 @@ import scala.util.control.NonFatal
 
 import org.apache.hadoop.fs.{FileSystem, Path}
 
-import org.apache.spark.sql.{Row, SparkSession}
+import org.apache.spark.sql.{AnalysisException, Row, SparkSession}
 import org.apache.spark.sql.catalyst.analysis.EliminateSubqueryAliases
 import org.apache.spark.sql.catalyst.catalog.{CatalogRelation, CatalogTable}
 
@@ -99,8 +99,8 @@ case class AnalyzeTable(tableName: String) extends 
RunnableCommand {
         }
 
       case otherRelation =>
-        throw new UnsupportedOperationException(
-          s"Analyze only works for Hive tables, but $tableName is a 
${otherRelation.nodeName}")
+        throw new AnalysisException(s"ANALYZE TABLE is only supported for Hive 
tables, " +
+          s"but '${tableIdent.unquotedString}' is a 
${otherRelation.nodeName}.")
     }
     Seq.empty[Row]
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
index 4464711..aa06c01 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/ddl.scala
@@ -30,31 +30,10 @@ import org.apache.spark.sql.catalyst.parser.ParseException
 import org.apache.spark.sql.types._
 
 
-
 // Note: The definition of these commands are based on the ones described in
 // https://cwiki.apache.org/confluence/display/Hive/LanguageManual+DDL
 
 /**
- * A DDL command that is not supported right now. Since we have already 
implemented
- * the parsing rules for some commands that are not allowed, we use this as 
the base class
- * of those commands.
- */
-abstract class UnsupportedCommand(exception: ParseException) extends 
RunnableCommand {
-
-  // Throws the ParseException when we create this command.
-  throw exception
-
-  override def run(sparkSession: SparkSession): Seq[Row] = {
-    Seq.empty[Row]
-  }
-
-  override val output: Seq[Attribute] = {
-    Seq(AttributeReference("result", StringType, nullable = false)())
-  }
-
-}
-
-/**
  * A command for users to create a new database.
  *
  * It will issue an error message when the database with the same name already 
exists,
@@ -251,8 +230,8 @@ case class AlterTableSetProperties(
     val table = catalog.getTableMetadata(tableName)
     val newProperties = table.properties ++ properties
     if (DDLUtils.isDatasourceTable(newProperties)) {
-      throw new AnalysisException(
-        "alter table properties is not supported for tables defined using the 
datasource API")
+      throw new AnalysisException("ALTER TABLE SET TBLPROPERTIES is not 
supported for " +
+        "tables defined using the datasource API")
     }
     val newTable = table.copy(properties = newProperties)
     catalog.alterTable(newTable)
@@ -319,15 +298,14 @@ case class AlterTableSerDeProperties(
 
   // should never happen if we parsed things correctly
   require(serdeClassName.isDefined || serdeProperties.isDefined,
-    "alter table attempted to set neither serde class name nor serde 
properties")
+    "ALTER TABLE attempted to set neither serde class name nor serde 
properties")
 
   override def run(sparkSession: SparkSession): Seq[Row] = {
     val catalog = sparkSession.sessionState.catalog
     val table = catalog.getTableMetadata(tableName)
     // Do not support setting serde for datasource tables
     if (serdeClassName.isDefined && DDLUtils.isDatasourceTable(table)) {
-      throw new AnalysisException(
-        "alter table serde is not supported for datasource tables")
+      throw new AnalysisException("ALTER TABLE SET SERDE is not supported for 
datasource tables")
     }
     val newTable = table.withNewStorage(
       serde = serdeClassName.orElse(table.storage.serde),
@@ -361,7 +339,7 @@ case class AlterTableAddPartition(
     val table = catalog.getTableMetadata(tableName)
     if (DDLUtils.isDatasourceTable(table)) {
       throw new AnalysisException(
-        "alter table add partition is not allowed for tables defined using the 
datasource API")
+        "ALTER TABLE ADD PARTITION is not allowed for tables defined using the 
datasource API")
     }
     val parts = partitionSpecsAndLocs.map { case (spec, location) =>
       // inherit table storage format (possibly except for location)
@@ -420,7 +398,7 @@ case class AlterTableDropPartition(
     val table = catalog.getTableMetadata(tableName)
     if (DDLUtils.isDatasourceTable(table)) {
       throw new AnalysisException(
-        "alter table drop partition is not allowed for tables defined using 
the datasource API")
+        "ALTER TABLE DROP PARTITIONS is not allowed for tables defined using 
the datasource API")
     }
     catalog.dropPartitions(tableName, specs, ignoreIfNotExists = ifExists)
     Seq.empty[Row]
@@ -428,12 +406,6 @@ case class AlterTableDropPartition(
 
 }
 
-case class AlterTableSetFileFormat(
-    tableName: TableIdentifier,
-    partitionSpec: Option[TablePartitionSpec],
-    fileFormat: Seq[String],
-    genericFormat: Option[String])(exception: ParseException)
-  extends UnsupportedCommand(exception) with Logging
 
 /**
  * A command that sets the location of a table or a partition.
@@ -462,7 +434,7 @@ case class AlterTableSetLocation(
         val newPart =
           if (DDLUtils.isDatasourceTable(table)) {
             throw new AnalysisException(
-              "alter table set location for partition is not allowed for 
tables defined " +
+              "ALTER TABLE SET LOCATION for partition is not allowed for 
tables defined " +
               "using the datasource API")
           } else {
             part.copy(storage = part.storage.copy(locationUri = 
Some(location)))
@@ -485,34 +457,6 @@ case class AlterTableSetLocation(
 
 }
 
-case class AlterTableChangeCol(
-    tableName: TableIdentifier,
-    partitionSpec: Option[TablePartitionSpec],
-    oldColName: String,
-    newColName: String,
-    dataType: DataType,
-    comment: Option[String],
-    afterColName: Option[String],
-    restrict: Boolean,
-    cascade: Boolean)(exception: ParseException)
-  extends UnsupportedCommand(exception) with Logging
-
-case class AlterTableAddCol(
-    tableName: TableIdentifier,
-    partitionSpec: Option[TablePartitionSpec],
-    columns: StructType,
-    restrict: Boolean,
-    cascade: Boolean)(exception: ParseException)
-  extends UnsupportedCommand(exception) with Logging
-
-case class AlterTableReplaceCol(
-    tableName: TableIdentifier,
-    partitionSpec: Option[TablePartitionSpec],
-    columns: StructType,
-    restrict: Boolean,
-    cascade: Boolean)(exception: ParseException)
-  extends UnsupportedCommand(exception) with Logging
-
 
 private[sql] object DDLUtils {
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/main/scala/org/apache/spark/sql/execution/command/functions.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/functions.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/functions.scala
index 5aa779d..73c1ef7 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/functions.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/functions.scala
@@ -51,9 +51,8 @@ case class CreateFunction(
     val catalog = sparkSession.sessionState.catalog
     if (isTemp) {
       if (databaseName.isDefined) {
-        throw new AnalysisException(
-          s"It is not allowed to provide database name when defining a 
temporary function. " +
-            s"However, database name ${databaseName.get} is provided.")
+        throw new AnalysisException(s"Specifying a database in CREATE 
TEMPORARY FUNCTION " +
+          s"is not allowed: '${databaseName.get}'")
       }
       // We first load resources and then put the builder in the function 
registry.
       // Please note that it is allowed to overwrite an existing temp function.
@@ -153,9 +152,8 @@ case class DropFunction(
     val catalog = sparkSession.sessionState.catalog
     if (isTemp) {
       if (databaseName.isDefined) {
-        throw new AnalysisException(
-          s"It is not allowed to provide database name when dropping a 
temporary function. " +
-            s"However, database name ${databaseName.get} is provided.")
+        throw new AnalysisException(s"Specifying a database in DROP TEMPORARY 
FUNCTION " +
+          s"is not allowed: '${databaseName.get}'")
       }
       catalog.dropTempFunction(functionName, ifExists)
     } else {

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
index 6078918..489c980 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/tables.scala
@@ -162,37 +162,36 @@ case class LoadData(
   override def run(sparkSession: SparkSession): Seq[Row] = {
     val catalog = sparkSession.sessionState.catalog
     if (!catalog.tableExists(table)) {
-      throw new AnalysisException(
-        s"Table in LOAD DATA does not exist: '$table'")
+      throw new AnalysisException(s"Target table in LOAD DATA does not exist: 
'$table'")
     }
-
     val targetTable = catalog.getTableMetadataOption(table).getOrElse {
-      throw new AnalysisException(
-        s"Table in LOAD DATA cannot be temporary: '$table'")
+      throw new AnalysisException(s"Target table in LOAD DATA cannot be 
temporary: '$table'")
     }
-
     if (DDLUtils.isDatasourceTable(targetTable)) {
-      throw new AnalysisException(
-        "LOAD DATA is not supported for datasource tables")
+      throw new AnalysisException(s"LOAD DATA is not supported for datasource 
tables: '$table'")
     }
-
     if (targetTable.partitionColumnNames.nonEmpty) {
-      if (partition.isEmpty || targetTable.partitionColumnNames.size != 
partition.get.size) {
-        throw new AnalysisException(
-          "LOAD DATA to partitioned table must specify a specific partition of 
" +
-          "the table by specifying values for all of the partitioning 
columns.")
+      if (partition.isEmpty) {
+        throw new AnalysisException(s"LOAD DATA target table '$table' is 
partitioned, " +
+          s"but no partition spec is provided")
+      }
+      if (targetTable.partitionColumnNames.size != partition.get.size) {
+        throw new AnalysisException(s"LOAD DATA target table '$table' is 
partitioned, " +
+          s"but number of columns in provided partition spec 
(${partition.get.size}) " +
+          s"do not match number of partitioned columns in table " +
+          s"(s${targetTable.partitionColumnNames.size})")
       }
-
       partition.get.keys.foreach { colName =>
         if (!targetTable.partitionColumnNames.contains(colName)) {
-          throw new AnalysisException(
-            s"LOAD DATA to partitioned table specifies a non-existing 
partition column: '$colName'")
+          throw new AnalysisException(s"LOAD DATA target table '$table' is 
partitioned, " +
+            s"but the specified partition spec refers to a column that is not 
partitioned: " +
+            s"'$colName'")
         }
       }
     } else {
       if (partition.nonEmpty) {
-        throw new AnalysisException(
-          "LOAD DATA to non-partitioned table cannot specify partition.")
+        throw new AnalysisException(s"LOAD DATA target table '$table' is not 
partitioned, " +
+          s"but a partition spec was provided.")
       }
     }
 
@@ -200,7 +199,7 @@ case class LoadData(
       if (isLocal) {
         val uri = Utils.resolveURI(path)
         if (!new File(uri.getPath()).exists()) {
-          throw new AnalysisException(s"LOAD DATA with non-existing path: 
$path")
+          throw new AnalysisException(s"LOAD DATA input path does not exist: 
$path")
         }
         uri
       } else {
@@ -231,7 +230,7 @@ case class LoadData(
 
           if (scheme == null) {
             throw new AnalysisException(
-              "LOAD DATA with non-local path must specify URI Scheme.")
+              s"LOAD DATA: URI scheme is required for non-local input paths: 
'$path'")
           }
 
           // Follow Hive's behavior:

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
index 1641780..0f656ef 100644
--- a/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
+++ b/sql/core/src/main/scala/org/apache/spark/sql/execution/command/views.scala
@@ -58,8 +58,7 @@ case class CreateViewCommand(
   private val tableIdentifier = tableDesc.identifier
 
   if (allowExisting && replace) {
-    throw new AnalysisException(
-      "It is not allowed to define a view with both IF NOT EXISTS and OR 
REPLACE.")
+    throw new AnalysisException("CREATE VIEW with both IF NOT EXISTS and 
REPLACE is not allowed.")
   }
 
   override def run(sparkSession: SparkSession): Seq[Row] = {

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
index be0f4d7..bd428a0 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DDLCommandSuite.scala
@@ -23,17 +23,18 @@ import org.apache.spark.sql.catalyst.plans.PlanTest
 import org.apache.spark.sql.catalyst.plans.logical.Project
 import org.apache.spark.sql.execution.SparkSqlParser
 import org.apache.spark.sql.internal.SQLConf
-import org.apache.spark.sql.types._
+
 
 // TODO: merge this with DDLSuite (SPARK-14441)
 class DDLCommandSuite extends PlanTest {
   private val parser = new SparkSqlParser(new SQLConf)
 
-  private def assertUnsupported(sql: String): Unit = {
+  private def assertUnsupported(sql: String, containsThesePhrases: Seq[String] 
= Seq()): Unit = {
     val e = intercept[ParseException] {
       parser.parsePlan(sql)
     }
     assert(e.getMessage.toLowerCase.contains("operation not allowed"))
+    containsThesePhrases.foreach { p => 
assert(e.getMessage.toLowerCase.contains(p)) }
   }
 
   test("create database") {
@@ -347,27 +348,13 @@ class DDLCommandSuite extends PlanTest {
     comparePlans(parsed2, expected2)
   }
 
-  // ALTER VIEW view_name ADD [IF NOT EXISTS] PARTITION partition_spec 
PARTITION partition_spec ...;
-  test("alter view: add partition") {
-    val sql1 =
+  test("alter view: add partition (not supported)") {
+    assertUnsupported(
       """
         |ALTER VIEW view_name ADD IF NOT EXISTS PARTITION
         |(dt='2008-08-08', country='us') PARTITION
         |(dt='2009-09-09', country='uk')
-      """.stripMargin
-    // different constant types in partitioning spec
-    val sql2 =
-    """
-      |ALTER VIEW view_name ADD PARTITION
-      |(col1=NULL, cOL2='f', col3=5, COL4=true)
-    """.stripMargin
-
-    intercept[ParseException] {
-      parser.parsePlan(sql1)
-    }
-    intercept[ParseException] {
-      parser.parsePlan(sql2)
-    }
+      """.stripMargin)
   }
 
   test("alter table: rename partition") {
@@ -392,7 +379,7 @@ class DDLCommandSuite extends PlanTest {
       """.stripMargin)
   }
 
-  // ALTER TABLE table_name DROP [IF EXISTS] PARTITION spec1[, PARTITION 
spec2, ...] [PURGE]
+  // ALTER TABLE table_name DROP [IF EXISTS] PARTITION spec1[, PARTITION 
spec2, ...]
   // ALTER VIEW table_name DROP [IF EXISTS] PARTITION spec1[, PARTITION spec2, 
...]
   test("alter table/view: drop partitions") {
     val sql1_table =
@@ -403,24 +390,17 @@ class DDLCommandSuite extends PlanTest {
     val sql2_table =
       """
        |ALTER TABLE table_name DROP PARTITION
-       |(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', 
country='uk') PURGE
+       |(dt='2008-08-08', country='us'), PARTITION (dt='2009-09-09', 
country='uk')
       """.stripMargin
     val sql1_view = sql1_table.replace("TABLE", "VIEW")
-    // Note: ALTER VIEW DROP PARTITION does not support PURGE
-    val sql2_view = sql2_table.replace("TABLE", "VIEW").replace("PURGE", "")
+    val sql2_view = sql2_table.replace("TABLE", "VIEW")
 
     val parsed1_table = parser.parsePlan(sql1_table)
-    val e = intercept[ParseException] {
-      parser.parsePlan(sql2_table)
-    }
-    assert(e.getMessage.contains("Operation not allowed"))
-
-    intercept[ParseException] {
-      parser.parsePlan(sql1_view)
-    }
-    intercept[ParseException] {
-      parser.parsePlan(sql2_view)
-    }
+    val parsed2_table = parser.parsePlan(sql2_table)
+    assertUnsupported(sql1_table + " PURGE")
+    assertUnsupported(sql2_table + " PURGE")
+    assertUnsupported(sql1_view)
+    assertUnsupported(sql2_view)
 
     val tableIdent = TableIdentifier("table_name", None)
     val expected1_table = AlterTableDropPartition(
@@ -429,8 +409,10 @@ class DDLCommandSuite extends PlanTest {
         Map("dt" -> "2008-08-08", "country" -> "us"),
         Map("dt" -> "2009-09-09", "country" -> "uk")),
       ifExists = true)
+    val expected2_table = expected1_table.copy(ifExists = false)
 
     comparePlans(parsed1_table, expected1_table)
+    comparePlans(parsed2_table, expected2_table)
   }
 
   test("alter table: archive partition (not supported)") {
@@ -441,29 +423,6 @@ class DDLCommandSuite extends PlanTest {
     assertUnsupported("ALTER TABLE table_name UNARCHIVE PARTITION 
(dt='2008-08-08', country='us')")
   }
 
-  /*
-  test("alter table: set file format") {
-    val sql1 = "ALTER TABLE table_name SET FILEFORMAT INPUTFORMAT 'test' " +
-      "OUTPUTFORMAT 'test' SERDE 'test'"
-    val sql2 = "ALTER TABLE table_name PARTITION (dt='2008-08-08', 
country='us') " +
-      "SET FILEFORMAT PARQUET"
-    val parsed1 = parser.parsePlan(sql1)
-    val parsed2 = parser.parsePlan(sql2)
-    val tableIdent = TableIdentifier("table_name", None)
-    val expected1 = AlterTableSetFileFormat(
-      tableIdent,
-      None,
-      List("test", "test", "test"),
-      None)(sql1)
-    val expected2 = AlterTableSetFileFormat(
-      tableIdent,
-      Some(Map("dt" -> "2008-08-08", "country" -> "us")),
-      Seq(),
-      Some("PARQUET"))(sql2)
-    comparePlans(parsed1, expected1)
-    comparePlans(parsed2, expected2)
-  } */
-
   test("alter table: set file format (not allowed)") {
     assertUnsupported(
       "ALTER TABLE table_name SET FILEFORMAT INPUTFORMAT 'test' " +
@@ -527,58 +486,6 @@ class DDLCommandSuite extends PlanTest {
     assertUnsupported("ALTER TABLE table_name SKEWED BY (key) ON (1,5,6) 
STORED AS DIRECTORIES")
   }
 
-  /*
-  test("alter table: change column name/type/position/comment") {
-    val sql1 = "ALTER TABLE table_name CHANGE col_old_name col_new_name INT"
-    val sql2 =
-      """
-       |ALTER TABLE table_name CHANGE COLUMN col_old_name col_new_name INT
-       |COMMENT 'col_comment' FIRST CASCADE
-      """.stripMargin
-    val sql3 =
-      """
-       |ALTER TABLE table_name CHANGE COLUMN col_old_name col_new_name INT
-       |COMMENT 'col_comment' AFTER column_name RESTRICT
-      """.stripMargin
-    val parsed1 = parser.parsePlan(sql1)
-    val parsed2 = parser.parsePlan(sql2)
-    val parsed3 = parser.parsePlan(sql3)
-    val tableIdent = TableIdentifier("table_name", None)
-    val expected1 = AlterTableChangeCol(
-      tableName = tableIdent,
-      partitionSpec = None,
-      oldColName = "col_old_name",
-      newColName = "col_new_name",
-      dataType = IntegerType,
-      comment = None,
-      afterColName = None,
-      restrict = false,
-      cascade = false)(sql1)
-    val expected2 = AlterTableChangeCol(
-      tableName = tableIdent,
-      partitionSpec = None,
-      oldColName = "col_old_name",
-      newColName = "col_new_name",
-      dataType = IntegerType,
-      comment = Some("col_comment"),
-      afterColName = None,
-      restrict = false,
-      cascade = true)(sql2)
-    val expected3 = AlterTableChangeCol(
-      tableName = tableIdent,
-      partitionSpec = None,
-      oldColName = "col_old_name",
-      newColName = "col_new_name",
-      dataType = IntegerType,
-      comment = Some("col_comment"),
-      afterColName = Some("column_name"),
-      restrict = true,
-      cascade = false)(sql3)
-    comparePlans(parsed1, expected1)
-    comparePlans(parsed2, expected2)
-    comparePlans(parsed3, expected3)
-  } */
-
   test("alter table: change column name/type/position/comment (not allowed)") {
     assertUnsupported("ALTER TABLE table_name CHANGE col_old_name col_new_name 
INT")
     assertUnsupported(
@@ -592,44 +499,6 @@ class DDLCommandSuite extends PlanTest {
       """.stripMargin)
   }
 
-  /*
-  test("alter table: add/replace columns") {
-    val sql1 =
-      """
-       |ALTER TABLE table_name PARTITION (dt='2008-08-08', country='us')
-       |ADD COLUMNS (new_col1 INT COMMENT 'test_comment', new_col2 LONG
-       |COMMENT 'test_comment2') CASCADE
-      """.stripMargin
-    val sql2 =
-      """
-       |ALTER TABLE table_name REPLACE COLUMNS (new_col1 INT
-       |COMMENT 'test_comment', new_col2 LONG COMMENT 'test_comment2') RESTRICT
-      """.stripMargin
-    val parsed1 = parser.parsePlan(sql1)
-    val parsed2 = parser.parsePlan(sql2)
-    val meta1 = new MetadataBuilder().putString("comment", 
"test_comment").build()
-    val meta2 = new MetadataBuilder().putString("comment", 
"test_comment2").build()
-    val tableIdent = TableIdentifier("table_name", None)
-    val expected1 = AlterTableAddCol(
-      tableIdent,
-      Some(Map("dt" -> "2008-08-08", "country" -> "us")),
-      StructType(Seq(
-        StructField("new_col1", IntegerType, nullable = true, meta1),
-        StructField("new_col2", LongType, nullable = true, meta2))),
-      restrict = false,
-      cascade = true)(sql1)
-    val expected2 = AlterTableReplaceCol(
-      tableIdent,
-      None,
-      StructType(Seq(
-        StructField("new_col1", IntegerType, nullable = true, meta1),
-        StructField("new_col2", LongType, nullable = true, meta2))),
-      restrict = true,
-      cascade = false)(sql2)
-    comparePlans(parsed1, expected1)
-    comparePlans(parsed2, expected2)
-  } */
-
   test("alter table: add/replace columns (not allowed)") {
     assertUnsupported(
       """
@@ -678,6 +547,7 @@ class DDLCommandSuite extends PlanTest {
     val parsed2 = parser.parsePlan(s"DROP TABLE IF EXISTS $tableName1")
     val parsed3 = parser.parsePlan(s"DROP TABLE $tableName2")
     val parsed4 = parser.parsePlan(s"DROP TABLE IF EXISTS $tableName2")
+    assertUnsupported(s"DROP TABLE IF EXISTS $tableName2 PURGE")
 
     val expected1 =
       DropTable(TableIdentifier("tab", Option("db")), ifExists = false, isView 
= false)
@@ -722,20 +592,20 @@ class DDLCommandSuite extends PlanTest {
     val sql1 = "SHOW COLUMNS FROM t1"
     val sql2 = "SHOW COLUMNS IN db1.t1"
     val sql3 = "SHOW COLUMNS FROM t1 IN db1"
-    val sql4 = "SHOW COLUMNS FROM db1.t1 IN db2"
+    val sql4 = "SHOW COLUMNS FROM db1.t1 IN db1"
+    val sql5 = "SHOW COLUMNS FROM db1.t1 IN db2"
 
     val parsed1 = parser.parsePlan(sql1)
     val expected1 = ShowColumnsCommand(TableIdentifier("t1", None))
     val parsed2 = parser.parsePlan(sql2)
     val expected2 = ShowColumnsCommand(TableIdentifier("t1", Some("db1")))
     val parsed3 = parser.parsePlan(sql3)
+    val parsed4 = parser.parsePlan(sql3)
     comparePlans(parsed1, expected1)
     comparePlans(parsed2, expected2)
     comparePlans(parsed3, expected2)
-    val message = intercept[ParseException] {
-      parser.parsePlan(sql4)
-    }.getMessage
-    assert(message.contains("Duplicates the declaration for database"))
+    comparePlans(parsed4, expected2)
+    assertUnsupported(sql5)
   }
 
   test("show partitions") {

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
index cb88a1c..c1dc9b9 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/sources/CreateTableAsSelectSuite.scala
@@ -22,6 +22,7 @@ import java.io.{File, IOException}
 import org.scalatest.BeforeAndAfter
 
 import org.apache.spark.sql.AnalysisException
+import org.apache.spark.sql.catalyst.parser.ParseException
 import org.apache.spark.sql.test.SharedSQLContext
 import org.apache.spark.util.Utils
 
@@ -104,7 +105,7 @@ class CreateTableAsSelectSuite extends DataSourceTest with 
SharedSQLContext with
       sql("SELECT a, b FROM jsonTable"),
       sql("SELECT a, b FROM jt").collect())
 
-    val message = intercept[AnalysisException]{
+    val message = intercept[ParseException]{
       sql(
         s"""
         |CREATE TEMPORARY TABLE IF NOT EXISTS jsonTable
@@ -115,9 +116,7 @@ class CreateTableAsSelectSuite extends DataSourceTest with 
SharedSQLContext with
         |SELECT a * 4 FROM jt
       """.stripMargin)
     }.getMessage
-    assert(
-      message.contains(s"a CREATE TEMPORARY TABLE statement does not allow IF 
NOT EXISTS clause."),
-      "CREATE TEMPORARY TABLE IF NOT EXISTS should not be allowed.")
+    assert(message.toLowerCase.contains("operation not allowed"))
 
     // Overwrite the temporary table.
     sql(
@@ -155,7 +154,7 @@ class CreateTableAsSelectSuite extends DataSourceTest with 
SharedSQLContext with
   }
 
   test("CREATE TEMPORARY TABLE AS SELECT with IF NOT EXISTS is not allowed") {
-    val message = intercept[AnalysisException]{
+    val message = intercept[ParseException]{
       sql(
         s"""
         |CREATE TEMPORARY TABLE IF NOT EXISTS jsonTable
@@ -166,9 +165,7 @@ class CreateTableAsSelectSuite extends DataSourceTest with 
SharedSQLContext with
         |SELECT b FROM jt
       """.stripMargin)
     }.getMessage
-    assert(
-      message.contains("a CREATE TEMPORARY TABLE statement does not allow IF 
NOT EXISTS clause."),
-      "CREATE TEMPORARY TABLE IF NOT EXISTS should not be allowed.")
+    assert(message.toLowerCase.contains("operation not allowed"))
   }
 
   test("a CTAS statement with column definitions is not allowed") {

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
 
b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
index f082035..1eebeca 100644
--- 
a/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
+++ 
b/sql/hive/compatibility/src/test/scala/org/apache/spark/sql/hive/execution/HiveCompatibilitySuite.scala
@@ -503,7 +503,12 @@ class HiveCompatibilitySuite extends HiveQueryFileTest 
with BeforeAndAfter {
     // We have converted the useful parts of these tests to tests
     // in org.apache.spark.sql.hive.execution.SQLQuerySuite.
     "drop_database_removes_partition_dirs",
-    "drop_table_removes_partition_dirs"
+    "drop_table_removes_partition_dirs",
+
+    // These tests use EXPLAIN FORMATTED, which is not supported
+    "input4",
+    "join0",
+    "plan_json"
   )
 
   /**
@@ -699,7 +704,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with 
BeforeAndAfter {
     "input26",
     "input28",
     "input2_limit",
-    "input4",
     "input40",
     "input41",
     "input49",
@@ -728,7 +732,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with 
BeforeAndAfter {
     "insert1_overwrite_partitions",
     "insert2_overwrite_partitions",
     "insert_compressed",
-    "join0",
     "join1",
     "join10",
     "join11",
@@ -866,7 +869,6 @@ class HiveCompatibilitySuite extends HiveQueryFileTest with 
BeforeAndAfter {
     "partition_type_check",
     "partition_varchar1",
     "partition_wise_fileformat9",
-    "plan_json",
     "ppd1",
     "ppd2",
     "ppd_clusterby",

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
index ee048b2..5ffd8ef 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
@@ -75,7 +75,7 @@ private[spark] class HiveExternalCatalog(client: HiveClient) 
extends ExternalCat
   private def requireDbMatches(db: String, table: CatalogTable): Unit = {
     if (table.identifier.database != Some(db)) {
       throw new AnalysisException(
-        s"Provided database $db does not match the one specified in the " +
+        s"Provided database '$db' does not match the one specified in the " +
         s"table definition (${table.identifier.database.getOrElse("n/a")})")
     }
   }

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
index cdfadfa..47d9546 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
@@ -26,9 +26,12 @@ import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.Path
 import org.apache.hadoop.hive.conf.HiveConf
 import org.apache.hadoop.hive.metastore.{PartitionDropOptions, TableType => 
HiveTableType}
-import org.apache.hadoop.hive.metastore.api.{Database => HiveDatabase, 
FieldSchema, Function => HiveFunction, FunctionType, PrincipalType, 
ResourceType, ResourceUri}
+import org.apache.hadoop.hive.metastore.api.{Database => HiveDatabase, 
FieldSchema}
+import org.apache.hadoop.hive.metastore.api.{Function => HiveFunction, 
FunctionType}
+import org.apache.hadoop.hive.metastore.api.{NoSuchObjectException, 
PrincipalType}
+import org.apache.hadoop.hive.metastore.api.{ResourceType, ResourceUri}
 import org.apache.hadoop.hive.ql.Driver
-import org.apache.hadoop.hive.ql.metadata.{Hive, HiveException, Partition => 
HivePartition, Table => HiveTable}
+import org.apache.hadoop.hive.ql.metadata.{Hive, Partition => HivePartition, 
Table => HiveTable}
 import org.apache.hadoop.hive.ql.plan.AddPartitionDesc
 import org.apache.hadoop.hive.ql.processors._
 import org.apache.hadoop.hive.ql.session.SessionState
@@ -43,7 +46,7 @@ import org.apache.spark.sql.catalyst.catalog._
 import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
 import org.apache.spark.sql.catalyst.expressions.Expression
 import org.apache.spark.sql.execution.QueryExecutionException
-import org.apache.spark.util.{CircularBuffer, Utils}
+import org.apache.spark.util.{CausedBy, CircularBuffer, Utils}
 
 /**
  * A class that wraps the HiveClient and converts its responses to externally 
visible classes.
@@ -616,7 +619,8 @@ private[hive] class HiveClientImpl(
     try {
       Option(client.getFunction(db, name)).map(fromHiveFunction)
     } catch {
-      case he: HiveException => None
+      case CausedBy(ex: NoSuchObjectException) if ex.getMessage.contains(name) 
=>
+        None
     }
   }
 

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala
index 8b3f2d1..b8fef23 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveCommandSuite.scala
@@ -125,7 +125,7 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils 
with TestHiveSingleto
     val message1 = intercept[AnalysisException] {
       sql("SHOW TBLPROPERTIES badtable")
     }.getMessage
-    assert(message1.contains("Table or View badtable not found in database 
default"))
+    assert(message1.contains("'badtable' not found in database 'default'"))
 
     // When key is not found, a row containing the error is returned.
     checkAnswer(
@@ -289,7 +289,7 @@ class HiveCommandSuite extends QueryTest with SQLTestUtils 
with TestHiveSingleto
     val message = intercept[NoSuchTableException] {
       sql("SHOW COLUMNS IN badtable FROM default")
     }.getMessage
-    assert(message.contains("badtable not found in database"))
+    assert(message.contains("'badtable' not found in database"))
   }
 
   test("show partitions - show everything") {

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
index f20ab36..f7da9e7 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLQuerySuite.scala
@@ -1502,7 +1502,7 @@ class SQLQuerySuite extends QueryTest with SQLTestUtils 
with TestHiveSingleton {
         )
       }
 
-      assert(cause.getMessage.contains("Only ASC ordering is supported for 
sorting columns"))
+      assert(cause.getMessage.contains("Column ordering must be ASC, was 
'DESC'"))
     }
   }
 }

http://git-wip-us.apache.org/repos/asf/spark/blob/6ba17cd1/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala
----------------------------------------------------------------------
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala
index 0d88b3b..5184847 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/SQLViewSuite.scala
@@ -105,7 +105,8 @@ class SQLViewSuite extends QueryTest with SQLTestUtils with 
TestHiveSingleton {
           val e = intercept[AnalysisException] {
             sql("CREATE OR REPLACE VIEW IF NOT EXISTS testView AS SELECT id 
FROM jt")
           }
-          assert(e.message.contains("not allowed to define a view"))
+          assert(e.message.contains(
+            "CREATE VIEW with both IF NOT EXISTS and REPLACE is not allowed"))
         }
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@spark.apache.org
For additional commands, e-mail: commits-h...@spark.apache.org

Reply via email to