This is an automated email from the ASF dual-hosted git repository.

yao pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/spark.git


The following commit(s) were added to refs/heads/master by this push:
     new 2af1cfeedaa5 [SPARK-55091][SQL] Reduce Hive RPC calls for DROP TABLE 
command
2af1cfeedaa5 is described below

commit 2af1cfeedaa5d97ff494a88c5df57a69e6c7ceea
Author: Wenchen Fan <[email protected]>
AuthorDate: Tue Jan 20 15:09:22 2026 +0800

    [SPARK-55091][SQL] Reduce Hive RPC calls for DROP TABLE command
    
    ### What changes were proposed in this pull request?
    
    This PR optimizes the DROP TABLE command to reduce the number of Hive RPC 
calls by:
    
    1. **Remove redundant checks in `SessionCatalog.getTableRawMetadata`** - 
removed `requireDbExists` and `requireTableExists` calls, letting 
`ExternalCatalog.getTable` handle error reporting.
    
    2. **Remove `requireDbExists` from `HiveExternalCatalog.dropTable`** - the 
database existence check is now handled at the lowest layer.
    
    3. **Update `HiveClientImpl.dropTable`** to handle missing database/table 
cases properly with optimistic exception handling.
    
    4. **Update `InMemoryCatalog.dropTable`** to handle missing database case 
consistently.
    
    ### Why are the changes needed?
    
    Before this change, DROP TABLE on an existing table made 10 Hive client 
calls. After this change, it only makes 3 calls:
    1. `tableExists` (in DropTableExec to check if table exists)
    2. `getTable` (to get table metadata)
    3. `dropTable` (the actual drop operation)
    
    For non-existent tables, it now only makes 1 call (just `tableExists`).
    
    ### Does this PR introduce _any_ user-facing change?
    
    No, the behavior is the same. Only internal RPC calls are reduced.
    
    ### How was this patch tested?
    
    Updated and extended existing tests in:
    - `ExternalCatalogSuite`
    - `SessionCatalogSuite`
    - `DropTableSuiteBase`
    - `DropTableSuite` (hive)
    
    ### Was this patch authored or co-authored using generative AI tooling?
    
    Yes. cursor 2.3.41
    
    Closes #53858 from cloud-fan/func.
    
    Lead-authored-by: Wenchen Fan <[email protected]>
    Co-authored-by: Wenchen Fan <[email protected]>
    Signed-off-by: Kent Yao <[email protected]>
---
 .../sql/catalyst/catalog/ExternalCatalog.scala     | 18 +++++++++--
 .../sql/catalyst/catalog/InMemoryCatalog.scala     | 12 +++++--
 .../sql/catalyst/catalog/SessionCatalog.scala      | 13 ++------
 .../catalyst/catalog/ExternalCatalogSuite.scala    |  7 ++--
 .../sql/catalyst/catalog/SessionCatalogSuite.scala | 11 +++----
 .../datasources/v2/V2SessionCatalog.scala          |  4 +++
 .../sql/execution/command/DropTableSuiteBase.scala | 14 ++++++++
 .../spark/sql/hive/HiveExternalCatalog.scala       |  1 -
 .../apache/spark/sql/hive/client/HiveClient.scala  | 26 ++++++++++++---
 .../spark/sql/hive/client/HiveClientImpl.scala     | 20 +++++++++++-
 .../command/AlterTableAddPartitionSuite.scala      |  4 +--
 .../command/AlterTableDropPartitionSuite.scala     | 12 +++----
 .../command/AlterTableRenamePartitionSuite.scala   |  6 ++--
 .../execution/command/DescribeTableSuite.scala     |  2 +-
 .../hive/execution/command/DropTableSuite.scala    | 37 +++++++++++++++++++++-
 .../execution/command/ShowPartitionsSuite.scala    |  2 +-
 16 files changed, 143 insertions(+), 46 deletions(-)

diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala
index d1f37020f211..ac737f171151 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalog.scala
@@ -17,8 +17,9 @@
 
 package org.apache.spark.sql.catalyst.catalog
 
-import org.apache.spark.sql.catalyst.analysis.{FunctionAlreadyExistsException, 
NoSuchDatabaseException, NoSuchFunctionException, NoSuchTableException}
+import org.apache.spark.sql.catalyst.analysis.{FunctionAlreadyExistsException, 
NoSuchFunctionException, NoSuchNamespaceException, NoSuchTableException}
 import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.connector.catalog.CatalogManager
 import org.apache.spark.sql.types.StructType
 
 /**
@@ -39,13 +40,13 @@ trait ExternalCatalog {
 
   protected def requireDbExists(db: String): Unit = {
     if (!databaseExists(db)) {
-      throw new NoSuchDatabaseException(db)
+      throw new 
NoSuchNamespaceException(Seq(CatalogManager.SESSION_CATALOG_NAME, db))
     }
   }
 
   protected def requireTableExists(db: String, table: String): Unit = {
     if (!tableExists(db, table)) {
-      throw new NoSuchTableException(db = db, table = table)
+      throw new NoSuchTableException(Seq(CatalogManager.SESSION_CATALOG_NAME, 
db, table))
     }
   }
 
@@ -94,6 +95,17 @@ trait ExternalCatalog {
 
   def createTable(tableDefinition: CatalogTable, ignoreIfExists: Boolean): Unit
 
+  /**
+   * Drop a table from the specified database.
+   *
+   * @param db database name
+   * @param table table name
+   * @param ignoreIfNotExists if true, do not throw an error if the table or 
database
+   *                          does not exist
+   * @param purge if true, completely remove the table data (skip trash)
+   * @throws NoSuchTableException if the table or database does not exist and
+   *                              ignoreIfNotExists is false
+   */
   def dropTable(
       db: String,
       table: String,
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
index f5c732ee1412..cd4a5645151b 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/InMemoryCatalog.scala
@@ -29,6 +29,7 @@ import org.apache.spark.sql.catalyst.analysis._
 import org.apache.spark.sql.catalyst.catalog.ExternalCatalogUtils._
 import org.apache.spark.sql.catalyst.expressions.Expression
 import org.apache.spark.sql.catalyst.util.StringUtils
+import org.apache.spark.sql.connector.catalog.CatalogManager
 import org.apache.spark.sql.connector.catalog.SupportsNamespaces.PROP_OWNER
 import org.apache.spark.sql.errors.{QueryCompilationErrors, 
QueryExecutionErrors}
 import org.apache.spark.sql.types.StructType
@@ -238,7 +239,13 @@ class InMemoryCatalog(
       table: String,
       ignoreIfNotExists: Boolean,
       purge: Boolean): Unit = synchronized {
-    requireDbExists(db)
+    if (!databaseExists(db)) {
+      if (ignoreIfNotExists) {
+        return
+      } else {
+        throw new 
NoSuchTableException(Seq(CatalogManager.SESSION_CATALOG_NAME, db, table))
+      }
+    }
     if (tableExists(db, table)) {
       val tableMeta = getTable(db, table)
       if (tableMeta.tableType == CatalogTableType.MANAGED) {
@@ -271,7 +278,7 @@ class InMemoryCatalog(
       catalog(db).tables.remove(table)
     } else {
       if (!ignoreIfNotExists) {
-        throw new NoSuchTableException(db = db, table = table)
+        throw new 
NoSuchTableException(Seq(CatalogManager.SESSION_CATALOG_NAME, db, table))
       }
     }
   }
@@ -362,6 +369,7 @@ class InMemoryCatalog(
   }
 
   override def getTable(db: String, table: String): CatalogTable = 
synchronized {
+    requireDbExists(db)
     requireTableExists(db, table)
     catalog(db).tables(table).table
   }
diff --git 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
index c0615ab2d2e6..130ccc1bc6e1 100644
--- 
a/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
+++ 
b/sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalog.scala
@@ -586,8 +586,7 @@ class SessionCatalog(
     val qualifiedIdent = qualifyIdentifier(name)
     val db = qualifiedIdent.database.get
     val table = qualifiedIdent.table
-    requireDbExists(db)
-    requireTableExists(qualifiedIdent)
+    // Let the external catalog handle all error cases (db not exists, table 
not exists)
     attachCatalogName(externalCatalog.getTable(db, table))
   }
 
@@ -893,14 +892,8 @@ class SessionCatalog(
       }
     } else {
       if (name.database.isDefined || !tempViews.contains(table)) {
-        requireDbExists(db)
-        // When ignoreIfNotExists is false, no exception is issued when the 
table does not exist.
-        // Instead, log it as an error message.
-        if (tableExists(qualifiedIdent)) {
-          externalCatalog.dropTable(db, table, ignoreIfNotExists = true, purge 
= purge)
-        } else if (!ignoreIfNotExists) {
-          throw new NoSuchTableException(db = db, table = table)
-        }
+        // Let the external catalog handle all error cases (db not exists, 
table not exists)
+        externalCatalog.dropTable(db, table, ignoreIfNotExists, purge)
       } else {
         tempViews.remove(table)
       }
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
index aecd5ef54d37..f06128f23679 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/ExternalCatalogSuite.scala
@@ -185,13 +185,12 @@ abstract class ExternalCatalogSuite extends SparkFunSuite 
{
 
   test("drop table when database/table does not exist") {
     val catalog = newBasicCatalog()
-    // Should always throw exception when the database does not exist
+    // Should throw exception when the database does not exist and 
ignoreIfNotExists is false
     intercept[AnalysisException] {
       catalog.dropTable("unknown_db", "unknown_table", ignoreIfNotExists = 
false, purge = false)
     }
-    intercept[AnalysisException] {
-      catalog.dropTable("unknown_db", "unknown_table", ignoreIfNotExists = 
true, purge = false)
-    }
+    // Should succeed (no-op) when the database does not exist and 
ignoreIfNotExists is true
+    catalog.dropTable("unknown_db", "unknown_table", ignoreIfNotExists = true, 
purge = false)
     // Should throw exception when the table does not exist, if 
ignoreIfNotExists is false
     intercept[AnalysisException] {
       catalog.dropTable("db2", "unknown_table", ignoreIfNotExists = false, 
purge = false)
diff --git 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
index 2841e5adb2ad..47e0321bdfef 100644
--- 
a/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
+++ 
b/sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/catalog/SessionCatalogSuite.scala
@@ -409,15 +409,14 @@ abstract class SessionCatalogSuite extends AnalysisTest 
with Eventually {
 
   test("drop table when database/table does not exist") {
     withBasicCatalog { catalog =>
-      // Should always throw exception when the database does not exist
-      intercept[NoSuchNamespaceException] {
+      // Should throw exception when the database does not exist and 
ignoreIfNotExists is false
+      intercept[NoSuchTableException] {
         catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), 
ignoreIfNotExists = false,
           purge = false)
       }
-      intercept[NoSuchNamespaceException] {
-        catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), 
ignoreIfNotExists = true,
-          purge = false)
-      }
+      // Should succeed (no-op) when the database does not exist and 
ignoreIfNotExists is true
+      catalog.dropTable(TableIdentifier("tbl1", Some("unknown_db")), 
ignoreIfNotExists = true,
+        purge = false)
       intercept[NoSuchTableException] {
         catalog.dropTable(TableIdentifier("unknown_table", Some("db2")), 
ignoreIfNotExists = false,
           purge = false)
diff --git 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
index 60ab6bbd722e..7cf040b1ec97 100644
--- 
a/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
+++ 
b/sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/v2/V2SessionCatalog.scala
@@ -137,6 +137,10 @@ class V2SessionCatalog(catalog: SessionCatalog)
     failTimeTravel(ident, loadTable(ident))
   }
 
+  override def tableExists(ident: Identifier): Boolean = {
+    catalog.tableExists(ident.asTableIdentifier)
+  }
+
   private def failTimeTravel(ident: Identifier, t: Table): Table = {
     val nameParts = t match {
       case V1Table(catalogTable) => catalogTable.identifier.nameParts
diff --git 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableSuiteBase.scala
 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableSuiteBase.scala
index 3df9843f8a43..5402546dfc15 100644
--- 
a/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableSuiteBase.scala
+++ 
b/sql/core/src/test/scala/org/apache/spark/sql/execution/command/DropTableSuiteBase.scala
@@ -76,6 +76,20 @@ trait DropTableSuiteBase extends QueryTest with 
DDLCommandTestUtils {
     }
   }
 
+  test("IF EXISTS with non-existent database") {
+    // DROP TABLE IF EXISTS should not throw when the database doesn't exist
+    sql(s"DROP TABLE IF EXISTS $catalog.non_existent_db.tbl")
+
+    // DROP TABLE without IF EXISTS should throw when the database doesn't 
exist
+    checkError(
+      intercept[AnalysisException] {
+        sql(s"DROP TABLE $catalog.non_existent_db.tbl")
+      },
+      condition = "TABLE_OR_VIEW_NOT_FOUND",
+      parameters = Map("relationName" -> s"`$catalog`.`non_existent_db`.`tbl`")
+    )
+  }
+
   test("SPARK-33174: DROP TABLE should resolve to a temporary view first") {
     withNamespaceAndTable("ns", "t") { t =>
       withTempView("t") {
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
index 21bdd28dfd88..8ec4f97c43e8 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/HiveExternalCatalog.scala
@@ -535,7 +535,6 @@ private[spark] class HiveExternalCatalog(conf: SparkConf, 
hadoopConf: Configurat
       table: String,
       ignoreIfNotExists: Boolean,
       purge: Boolean): Unit = withClient {
-    requireDbExists(db)
     client.dropTable(db, table, ignoreIfNotExists, purge)
   }
 
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala
index 402d3c4ae7d9..ad7648096354 100644
--- a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala
+++ b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClient.scala
@@ -23,6 +23,7 @@ import org.apache.spark.sql.catalyst.analysis._
 import org.apache.spark.sql.catalyst.catalog._
 import org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
 import org.apache.spark.sql.catalyst.expressions.Expression
+import org.apache.spark.sql.connector.catalog.CatalogManager
 import org.apache.spark.sql.types.StructType
 
 
@@ -93,18 +94,33 @@ private[hive] trait HiveClient {
   /** Return whether a table/view with the specified name exists. */
   def tableExists(dbName: String, tableName: String): Boolean
 
-  /** Returns the specified table, or throws `NoSuchTableException`. */
+  /**
+   * Returns the specified table, or throws [[NoSuchNamespaceException]] if 
the database
+   * does not exist, or [[NoSuchTableException]] if the table does not exist.
+   */
   final def getTable(dbName: String, tableName: String): CatalogTable = {
-    getTableOption(dbName, tableName).getOrElse(throw new 
NoSuchTableException(dbName, tableName))
+    getTableOption(dbName, tableName).getOrElse {
+      if (!databaseExists(dbName)) {
+        throw new 
NoSuchNamespaceException(Seq(CatalogManager.SESSION_CATALOG_NAME, dbName))
+      }
+      throw new NoSuchTableException(Seq(CatalogManager.SESSION_CATALOG_NAME, 
dbName, tableName))
+    }
   }
 
   /** Returns the metadata for the specified table or None if it doesn't 
exist. */
   def getTableOption(dbName: String, tableName: String): Option[CatalogTable]
 
-  /** Returns the specified catalog and Hive table, or throws 
`NoSuchTableException`. */
+  /**
+   * Returns the specified catalog and Hive table, or throws 
[[NoSuchNamespaceException]] if
+   * the database does not exist, or [[NoSuchTableException]] if the table 
does not exist.
+   */
   final def getRawHiveTable(dbName: String, tableName: String): RawHiveTable = 
{
-    getRawHiveTableOption(dbName, tableName)
-      .getOrElse(throw new NoSuchTableException(dbName, tableName))
+    getRawHiveTableOption(dbName, tableName).getOrElse {
+      if (!databaseExists(dbName)) {
+        throw new 
NoSuchNamespaceException(Seq(CatalogManager.SESSION_CATALOG_NAME, dbName))
+      }
+      throw new NoSuchTableException(Seq(CatalogManager.SESSION_CATALOG_NAME, 
dbName, tableName))
+    }
   }
 
   /** Returns the metadata for the specified catalog and Hive table or None if 
it doesn't exist. */
diff --git 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
index b13de8b7c6f7..4404a13c1266 100644
--- 
a/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
+++ 
b/sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveClientImpl.scala
@@ -28,6 +28,7 @@ import scala.annotation.tailrec
 import scala.collection.mutable
 import scala.collection.mutable.ArrayBuffer
 import scala.jdk.CollectionConverters._
+import scala.util.control.NonFatal
 
 import org.apache.hadoop.conf.Configuration
 import org.apache.hadoop.fs.Path
@@ -60,6 +61,7 @@ import 
org.apache.spark.sql.catalyst.catalog.CatalogTypes.TablePartitionSpec
 import org.apache.spark.sql.catalyst.expressions.Expression
 import org.apache.spark.sql.catalyst.parser.{CatalystSqlParser, ParseException}
 import org.apache.spark.sql.catalyst.util.CharVarcharUtils
+import org.apache.spark.sql.connector.catalog.CatalogManager
 import org.apache.spark.sql.connector.catalog.SupportsNamespaces._
 import org.apache.spark.sql.errors.{QueryCompilationErrors, 
QueryExecutionErrors}
 import org.apache.spark.sql.execution.QueryExecutionException
@@ -586,7 +588,23 @@ private[hive] class HiveClientImpl(
       tableName: String,
       ignoreIfNotExists: Boolean,
       purge: Boolean): Unit = withHiveState {
-    shim.dropTable(client, dbName, tableName, true, ignoreIfNotExists, purge)
+    try {
+      shim.dropTable(client, dbName, tableName, true, ignoreIfNotExists, purge)
+    } catch {
+      case NonFatal(e) =>
+        // Check if the error is due to missing database or table.
+        if (!databaseExists(dbName) || !tableExists(dbName, tableName)) {
+          if (ignoreIfNotExists) {
+            // Database or table doesn't exist and we're ignoring - treat as 
success
+            return
+          } else {
+            throw new NoSuchTableException(
+              Seq(CatalogManager.SESSION_CATALOG_NAME, dbName, tableName))
+          }
+        }
+        // Both database and table exist, so re-throw the original exception
+        throw e
+    }
   }
 
   override def alterTable(
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableAddPartitionSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableAddPartitionSuite.scala
index 24f62abe6609..f1540024a3e2 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableAddPartitionSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableAddPartitionSuite.scala
@@ -36,11 +36,11 @@ class AlterTableAddPartitionSuite
           sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0")
           assert(!statsOn || getTableSize(t) > 0)
 
-          checkHiveClientCalls(expected = 17) {
+          checkHiveClientCalls(expected = 9) {
             sql(s"ALTER TABLE $t ADD PARTITION (part=1)")
           }
           sql(s"CACHE TABLE $t")
-          checkHiveClientCalls(expected = 17) {
+          checkHiveClientCalls(expected = 9) {
             sql(s"ALTER TABLE $t ADD PARTITION (part=2)")
           }
         }
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala
index 050332603513..f83d163dd4cf 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableDropPartitionSuite.scala
@@ -36,14 +36,14 @@ class AlterTableDropPartitionSuite
           sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0")
           sql(s"INSERT INTO $t PARTITION (part=1) SELECT 1")
           sql(s"ALTER TABLE $t ADD PARTITION (part=2)") // empty partition
-          checkHiveClientCalls(expected = if (statsOn) 26 else 18) {
+          checkHiveClientCalls(expected = if (statsOn) 16 else 10) {
             sql(s"ALTER TABLE $t DROP PARTITION (part=2)")
           }
-          checkHiveClientCalls(expected = if (statsOn) 31 else 18) {
+          checkHiveClientCalls(expected = if (statsOn) 21 else 10) {
             sql(s"ALTER TABLE $t DROP PARTITION (part=0)")
           }
           sql(s"CACHE TABLE $t")
-          checkHiveClientCalls(expected = if (statsOn) 31 else 18) {
+          checkHiveClientCalls(expected = if (statsOn) 21 else 10) {
             sql(s"ALTER TABLE $t DROP PARTITION (part=1)")
           }
         }
@@ -61,14 +61,14 @@ class AlterTableDropPartitionSuite
           sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0")
           sql(s"INSERT INTO $t PARTITION (part=1) SELECT 1")
           sql(s"ALTER TABLE $t ADD PARTITION (part=2)") // empty partition
-          checkHiveClientCalls(expected = if (statsOn) 25 else 17) {
+          checkHiveClientCalls(expected = if (statsOn) 15 else 9) {
             sql(s"ALTER TABLE $t DROP PARTITION (part=2)")
           }
-          checkHiveClientCalls(expected = if (statsOn) 30 else 17) {
+          checkHiveClientCalls(expected = if (statsOn) 20 else 9) {
             sql(s"ALTER TABLE $t DROP PARTITION (part=0)")
           }
           sql(s"CACHE TABLE $t")
-          checkHiveClientCalls(expected = if (statsOn) 30 else 17) {
+          checkHiveClientCalls(expected = if (statsOn) 20 else 9) {
             sql(s"ALTER TABLE $t DROP PARTITION (part=1)")
           }
         }
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableRenamePartitionSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableRenamePartitionSuite.scala
index e3e7ad7e4545..964696eda3b6 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableRenamePartitionSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/AlterTableRenamePartitionSuite.scala
@@ -32,11 +32,11 @@ class AlterTableRenamePartitionSuite
       sql(s"CREATE TABLE $t (id int, part int) $defaultUsing PARTITIONED BY 
(part)")
       sql(s"INSERT INTO $t PARTITION (part=0) SELECT 0")
 
-      checkHiveClientCalls(expected = 19) {
+      checkHiveClientCalls(expected = 11) {
         sql(s"ALTER TABLE $t PARTITION (part=0) RENAME TO PARTITION (part=1)")
       }
       sql(s"CACHE TABLE $t")
-      checkHiveClientCalls(expected = 19) {
+      checkHiveClientCalls(expected = 11) {
         sql(s"ALTER TABLE $t PARTITION (part=1) RENAME TO PARTITION (part=2)")
       }
     }
@@ -44,7 +44,7 @@ class AlterTableRenamePartitionSuite
     withNamespaceAndTable("ns", "tbl") { t =>
       sql(s"CREATE TABLE $t (id int, PART int) $defaultUsing PARTITIONED BY 
(PART)")
       sql(s"INSERT INTO $t PARTITION (PART=0) SELECT 0")
-      checkHiveClientCalls(expected = 24) {
+      checkHiveClientCalls(expected = 16) {
         sql(s"ALTER TABLE $t PARTITION (PART=0) RENAME TO PARTITION (PART=1)")
       }
     }
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DescribeTableSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DescribeTableSuite.scala
index c12d236f4b68..3ee2295fea80 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DescribeTableSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DescribeTableSuite.scala
@@ -34,7 +34,7 @@ class DescribeTableSuite extends v1.DescribeTableSuiteBase 
with CommandSuiteBase
   test("Table Ownership") {
     withNamespaceAndTable("ns", "tbl") { t =>
       sql(s"CREATE TABLE $t (c int) $defaultUsing")
-      checkHiveClientCalls(expected = 6) {
+      checkHiveClientCalls(expected = 2) {
         checkAnswer(
           sql(s"DESCRIBE TABLE EXTENDED $t")
             .where("col_name='Owner'")
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DropTableSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DropTableSuite.scala
index e847e92c4cec..aa083bc54f07 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DropTableSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/DropTableSuite.scala
@@ -17,6 +17,7 @@
 
 package org.apache.spark.sql.hive.execution.command
 
+import org.apache.spark.sql.AnalysisException
 import org.apache.spark.sql.execution.command.v1
 
 /**
@@ -26,9 +27,43 @@ class DropTableSuite extends v1.DropTableSuiteBase with 
CommandSuiteBase {
   test("hive client calls") {
     withNamespaceAndTable("ns", "tbl") { t =>
       sql(s"CREATE TABLE $t (id int) $defaultUsing")
-      checkHiveClientCalls(expected = 10) {
+      // Drop existing table: 3 Hive client calls
+      // 1. tableExists (in DropTableExec to check if table exists)
+      // 2. getTable (in loadTable -> getTableRawMetadata to get table 
metadata)
+      // 3. dropTable (the actual drop operation)
+      checkHiveClientCalls(expected = 3) {
         sql(s"DROP TABLE $t")
       }
     }
+
+    withNamespace(s"$catalog.ns") {
+      sql(s"CREATE NAMESPACE $catalog.ns")
+      // Drop non-existent table with IF EXISTS: 1 Hive client call
+      // 1. tableExists (returns false, IF EXISTS allows silent return)
+      checkHiveClientCalls(expected = 1) {
+        sql(s"DROP TABLE IF EXISTS $catalog.ns.tbl")
+      }
+      // Drop non-existent table without IF EXISTS: 1 Hive client call
+      // 1. tableExists (returns false, throws TABLE_OR_VIEW_NOT_FOUND)
+      checkHiveClientCalls(expected = 1) {
+        intercept[AnalysisException] {
+          sql(s"DROP TABLE $catalog.ns.tbl")
+        }
+      }
+    }
+
+    // Drop table in non-existent database with IF EXISTS: 1 Hive client call
+    // 1. tableExists (returns false, IF EXISTS allows silent return)
+    checkHiveClientCalls(expected = 1) {
+      sql(s"DROP TABLE IF EXISTS $catalog.non_existent_db.tbl")
+    }
+
+    // Drop table in non-existent database without IF EXISTS: 1 Hive client 
call
+    // 1. tableExists (returns false, throws TABLE_OR_VIEW_NOT_FOUND)
+    checkHiveClientCalls(expected = 1) {
+      intercept[AnalysisException] {
+        sql(s"DROP TABLE $catalog.non_existent_db.tbl")
+      }
+    }
   }
 }
diff --git 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowPartitionsSuite.scala
 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowPartitionsSuite.scala
index c3d9790e1f6e..73d1918886ed 100644
--- 
a/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowPartitionsSuite.scala
+++ 
b/sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/command/ShowPartitionsSuite.scala
@@ -42,7 +42,7 @@ class ShowPartitionsSuite extends v1.ShowPartitionsSuiteBase 
with CommandSuiteBa
   test("hive client calls") {
     withNamespaceAndTable("ns", "dateTable") { t =>
       createDateTable(t)
-      checkHiveClientCalls(expected = 10) {
+      checkHiveClientCalls(expected = 6) {
         sql(s"SHOW PARTITIONS $t")
       }
     }


---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]

Reply via email to