This is an automated email from the ASF dual-hosted git repository.

dengzh pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/hive.git


The following commit(s) were added to refs/heads/master by this push:
     new af8501a8d04 HIVE-29280: Drop deprecated methods from Metastore server 
side (#6148)
af8501a8d04 is described below

commit af8501a8d04602c0e88402a55b9b6afcb1129590
Author: dengzh <[email protected]>
AuthorDate: Thu Nov 6 11:19:37 2025 +0800

    HIVE-29280: Drop deprecated methods from Metastore server side (#6148)
---
 .../hcatalog/listener/NotificationListener.java    |   6 +-
 .../hcatalog/listener/DummyRawStoreFailEvent.java  |  11 -
 .../AuthorizationPreEventListener.java             |   7 +-
 .../apache/hadoop/hive/metastore/AlterHandler.java |  92 ----
 .../apache/hadoop/hive/metastore/HMSHandler.java   | 171 +++-----
 .../hadoop/hive/metastore/HiveAlterHandler.java    |  19 -
 .../apache/hadoop/hive/metastore/IHMSHandler.java  |  20 -
 .../apache/hadoop/hive/metastore/ObjectStore.java  | 486 ++-------------------
 .../org/apache/hadoop/hive/metastore/RawStore.java | 338 +-------------
 .../metastore/TransactionalValidationListener.java |   5 +-
 .../hadoop/hive/metastore/cache/CachedStore.java   | 216 +--------
 .../client/builder/GetPartitionsArgs.java          |   4 +
 .../hive/metastore/utils/MetaStoreServerUtils.java |   4 -
 .../hadoop/hive/metastore/TestObjectStore.java     | 121 ++---
 .../hive/metastore/cache/TestCachedStore.java      |  43 +-
 15 files changed, 187 insertions(+), 1356 deletions(-)

diff --git 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
index efafe0c641e..4c1b0e1819e 100644
--- 
a/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
+++ 
b/hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/NotificationListener.java
@@ -36,6 +36,7 @@
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hive.metastore.IHMSHandler;
 import org.apache.hadoop.hive.metastore.MetaStoreEventListener;
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Partition;
 import org.apache.hadoop.hive.metastore.api.Table;
@@ -208,8 +209,9 @@ public void onCreateTable(CreateTableEvent tableEvent) 
throws MetaException {
       Configuration conf = handler.getConf();
       Table newTbl;
       try {
-        newTbl = handler.get_table_core(tbl.getCatName(), tbl.getDbName(), 
tbl.getTableName())
-          .deepCopy();
+        GetTableRequest getTableRequest = new GetTableRequest(tbl.getDbName(), 
tbl.getTableName());
+        getTableRequest.setCatName(tbl.getCatName());
+        newTbl = handler.get_table_core(getTableRequest).deepCopy();
         newTbl.getParameters().put(
           HCatConstants.HCAT_MSGBUS_TOPIC_NAME,
           getTopicPrefix(conf) + "." + newTbl.getDbName().toLowerCase() + "."
diff --git 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
index 0fc4e346433..f47b4d33f40 100644
--- 
a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
+++ 
b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/DummyRawStoreFailEvent.java
@@ -103,17 +103,6 @@ public boolean dropTable(String catName, String dbName, 
String tableName)
     }
   }
 
-  @Override
-  public boolean dropPartition(String catName, String dbName, String 
tableName, List<String> partVals)
-      throws MetaException, NoSuchObjectException,
-      InvalidObjectException, InvalidInputException {
-    if (shouldEventSucceed) {
-      return super.dropPartition(catName, dbName, tableName, partVals);
-    } else {
-      throw new RuntimeException("Event failed.");
-    }
-  }
-
   @Override
   public boolean dropPartition(String catName, String dbName, String 
tableName, String partName)
       throws MetaException, NoSuchObjectException,
diff --git 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
index 2cc057ee6e8..7b7b623e458 100644
--- 
a/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
+++ 
b/ql/src/java/org/apache/hadoop/hive/ql/security/authorization/AuthorizationPreEventListener.java
@@ -23,6 +23,8 @@
 
 import com.google.common.base.Function;
 import com.google.common.collect.Iterators;
+
+import org.apache.hadoop.hive.metastore.api.GetTableRequest;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -466,8 +468,9 @@ public 
PartitionWrapper(org.apache.hadoop.hive.metastore.api.Partition mapiPart,
       org.apache.hadoop.hive.metastore.api.Partition wrapperApiPart = 
mapiPart.deepCopy();
       String catName = mapiPart.isSetCatName() ? mapiPart.getCatName() :
           MetaStoreUtils.getDefaultCatalog(context.getHandler().getConf());
-      org.apache.hadoop.hive.metastore.api.Table t = 
context.getHandler().get_table_core(
-          catName, mapiPart.getDbName(), mapiPart.getTableName());
+      GetTableRequest getTableRequest = new 
GetTableRequest(mapiPart.getDbName(), mapiPart.getTableName());
+      getTableRequest.setCatName(catName);
+      org.apache.hadoop.hive.metastore.api.Table t = 
context.getHandler().get_table_core(getTableRequest);
       if (wrapperApiPart.getSd() == null){
         // In the cases of create partition, by the time this event fires, the 
partition
         // object has not yet come into existence, and thus will not yet have a
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
index 2595da5e42e..d5da8b21759 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/AlterHandler.java
@@ -34,39 +34,6 @@
  */
 public interface AlterHandler extends Configurable {
 
-  /**
-   * @deprecated As of release 2.2.0. Replaced by {@link #alterTable(RawStore, 
Warehouse, String,
-   * String, String, Table, EnvironmentContext, IHMSHandler, String)}
-   *
-   * handles alter table, the changes could be cascaded to partitions if 
applicable
-   *
-   * @param msdb
-   *          object to get metadata
-   * @param wh
-   *          Hive Warehouse where table data is stored
-   * @param catName
-   *          catalog of the table being altered
-   * @param dbname
-   *          database of the table being altered
-   * @param name
-   *          original name of the table being altered. same as
-   *          <i>newTable.tableName</i> if alter op is not a rename.
-   * @param newTable
-   *          new table object
-   * @param envContext
-   *          environment context variable
-   * @throws InvalidOperationException
-   *           thrown if the newTable object is invalid
-   * @throws MetaException
-   *           thrown if there is any other error
-   */
-  @Deprecated
-  default void alterTable(RawStore msdb, Warehouse wh, String catName, String 
dbname,
-    String name, Table newTable, EnvironmentContext envContext)
-      throws InvalidOperationException, MetaException {
-    alterTable(msdb, wh, catName, dbname, name, newTable, envContext, null, 
null);
-  }
-
   /**
    * handles alter table, the changes could be cascaded to partitions if 
applicable
    *
@@ -96,37 +63,6 @@ void alterTable(RawStore msdb, Warehouse wh, String catName, 
String dbname,
       IHMSHandler handler,  String writeIdList)
           throws InvalidOperationException, MetaException;
 
-  /**
-   * @deprecated As of release 2.2.0.  Replaced by {@link 
#alterPartitions(RawStore, Warehouse, String,
-   * String, String, List, EnvironmentContext, String, long, IHMSHandler)}
-   *
-   * handles alter partition
-   *
-   * @param msdb
-   *          object to get metadata
-   * @param wh
-   *          physical warehouse class
-   * @param dbname
-   *          database of the partition being altered
-   * @param name
-   *          table of the partition being altered
-   * @param part_vals
-   *          original values of the partition being altered
-   * @param new_part
-   *          new partition object
-   * @return the altered partition
-   * @throws InvalidOperationException thrown if the operation is invalid
-   * @throws InvalidObjectException thrown if the new_part object is invalid
-   * @throws AlreadyExistsException thrown if the new_part object already 
exists
-   * @throws MetaException thrown if there is any other error
-   * @throws NoSuchObjectException thrown if there is no such object
-   */
-  @Deprecated
-  Partition alterPartition(final RawStore msdb, Warehouse wh, final String 
dbname,
-    final String name, final List<String> part_vals, final Partition new_part,
-    EnvironmentContext environmentContext)
-      throws InvalidOperationException, InvalidObjectException, 
AlreadyExistsException, MetaException, NoSuchObjectException;
-
   /**
    * handles alter partition
    *
@@ -159,34 +95,6 @@ Partition alterPartition(final RawStore msdb, Warehouse wh, 
final String catName
                            IHMSHandler handler,  String validWriteIds)
       throws InvalidOperationException, InvalidObjectException, 
AlreadyExistsException, MetaException, NoSuchObjectException;
 
-  /**
-   * @deprecated As of release 3.0.0. Replaced by {@link 
#alterPartitions(RawStore, Warehouse, String,
-   * String, String, List, EnvironmentContext, String, long, IHMSHandler)}
-   *
-   * handles alter partitions
-   *
-   * @param msdb
-   *          object to get metadata
-   * @param wh physical warehouse class
-   * @param dbname
-   *          database of the partition being altered
-   * @param name
-   *          table of the partition being altered
-   * @param new_parts
-   *          new partition list
-   * @param environmentContext environment context variable
-   * @return the altered partition list
-   * @throws InvalidOperationException thrown if the operation is invalid
-   * @throws InvalidObjectException thrown if the new_parts object is invalid
-   * @throws AlreadyExistsException thrown if the new_part object already 
exists
-   * @throws MetaException thrown if there is any other error
-   */
-  @Deprecated
-  List<Partition> alterPartitions(final RawStore msdb, Warehouse wh,
-    final String dbname, final String name, final List<Partition> new_parts,
-    EnvironmentContext environmentContext)
-      throws InvalidOperationException, InvalidObjectException, 
AlreadyExistsException, MetaException;
-
   /**
    * handles alter partitions
    * @param msdb object to get metadata
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
index 423dffb455f..7ab569383af 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HMSHandler.java
@@ -2283,52 +2283,6 @@ public Table translate_table_dryrun(final 
CreateTableRequest req) throws Already
     return transformedTbl != null ? transformedTbl : tbl;
   }
 
-  private void create_table_core(final RawStore ms, final Table tbl,
-                                 final EnvironmentContext envContext)
-      throws AlreadyExistsException, MetaException,
-      InvalidObjectException, NoSuchObjectException, InvalidInputException {
-    CreateTableRequest req = new CreateTableRequest(tbl);
-    req.setEnvContext(envContext);
-    create_table_core(ms, req);
-  }
-
-  private void create_table_core(final RawStore ms, final Table tbl,
-                                 final EnvironmentContext envContext, 
List<SQLPrimaryKey> primaryKeys,
-                                 List<SQLForeignKey> foreignKeys, 
List<SQLUniqueConstraint> uniqueConstraints,
-                                 List<SQLNotNullConstraint> 
notNullConstraints, List<SQLDefaultConstraint> defaultConstraints,
-                                 List<SQLCheckConstraint> checkConstraints,
-                                 List<String> processorCapabilities, String 
processorIdentifier)
-      throws AlreadyExistsException, MetaException,
-      InvalidObjectException, NoSuchObjectException, InvalidInputException {
-    CreateTableRequest req = new CreateTableRequest(tbl);
-    if (envContext != null) {
-      req.setEnvContext(envContext);
-    }
-    if (primaryKeys != null) {
-      req.setPrimaryKeys(primaryKeys);
-    }
-    if (foreignKeys != null) {
-      req.setForeignKeys(foreignKeys);
-    }
-    if (uniqueConstraints != null) {
-      req.setUniqueConstraints(uniqueConstraints);
-    }
-    if (notNullConstraints != null) {
-      req.setNotNullConstraints(notNullConstraints);
-    }
-    if (defaultConstraints != null) {
-      req.setDefaultConstraints(defaultConstraints);
-    }
-    if (checkConstraints != null) {
-      req.setCheckConstraints(checkConstraints);
-    }
-    if (processorCapabilities != null) {
-      req.setProcessorCapabilities(processorCapabilities);
-      req.setProcessorIdentifier(processorIdentifier);
-    }
-    create_table_core(ms, req);
-  }
-
   private void create_table_core(final RawStore ms, final CreateTableRequest 
req)
       throws AlreadyExistsException, MetaException,
       InvalidObjectException, NoSuchObjectException, InvalidInputException {
@@ -3516,7 +3470,9 @@ private void truncateTableInternal(String dbName, String 
tableName, List<String>
     boolean isSkipTrash = false, needCmRecycle = false;
     try {
       String[] parsedDbName = parseDbName(dbName, conf);
-      Table tbl = get_table_core(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tableName);
+      GetTableRequest getTableRequest = new 
GetTableRequest(parsedDbName[DB_NAME], tableName);
+      getTableRequest.setCatName(parsedDbName[CAT_NAME]);
+      Table tbl = get_table_core(getTableRequest);
 
       boolean skipDataDeletion = Optional.ofNullable(context)
           .map(EnvironmentContext::getProperties)
@@ -3527,7 +3483,7 @@ private void truncateTableInternal(String dbName, String 
tableName, List<String>
       if (partNames == null) {
         if (0 != tbl.getPartitionKeysSize()) {
           partitionsList = getMS().getPartitions(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
-              tableName, -1);
+              tableName, GetPartitionsArgs.getAllPartitions());
         }
       } else {
         partitionsList = getMS().getPartitionsByNames(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME],
@@ -3756,32 +3712,6 @@ public List<TableMeta> get_table_meta(String dbnames, 
String tblNames, List<Stri
     return t;
   }
 
-  @Override
-  @Deprecated
-  public Table get_table_core(
-      final String catName,
-      final String dbname,
-      final String name)
-      throws MetaException, NoSuchObjectException {
-    GetTableRequest getTableRequest = new GetTableRequest(dbname,name);
-    getTableRequest.setCatName(catName);
-    return get_table_core(getTableRequest);
-  }
-
-  @Override
-  @Deprecated
-  public Table get_table_core(
-      final String catName,
-      final String dbname,
-      final String name,
-      final String writeIdList)
-      throws MetaException, NoSuchObjectException {
-    GetTableRequest getTableRequest = new GetTableRequest(dbname,name);
-    getTableRequest.setCatName(catName);
-    getTableRequest.setValidWriteIdList(writeIdList);
-    return get_table_core(getTableRequest);
-  }
-
   /**
    * This function retrieves table from metastore. If getColumnStats flag is 
true,
    * then engine should be specified so the table is retrieve with the column 
stats
@@ -5260,7 +5190,9 @@ public DropPartitionsResult drop_partitions_req(
       ms.openTransaction();
       // We need Partition-s for firing events and for result; DN needs 
MPartition-s to drop.
       // Great... Maybe we could bypass fetching MPartitions by issuing direct 
SQL deletes.
-      tbl = get_table_core(catName, dbName, tblName);
+      GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName);
+      getTableRequest.setCatName(catName);
+      tbl = get_table_core(getTableRequest);
       mustPurge = isMustPurge(envContext, tbl);
       tableDataShouldBeDeleted = checkTableDataShouldBeDeleted(tbl, 
deleteData);
       writeId = getWriteId(envContext);
@@ -5785,7 +5717,9 @@ public List<PartitionSpec> get_partitions_pspec(final 
String db_name, final Stri
 
     List<PartitionSpec> partitionSpecs = null;
     try {
-      Table table = get_table_core(catName, dbName, tableName);
+      GetTableRequest getTableRequest = new GetTableRequest(dbName, tableName);
+      getTableRequest.setCatName(catName);
+      Table table = get_table_core(getTableRequest);
       // get_partitions will parse out the catalog and db names itself
       List<Partition> partitions = get_partitions(db_name, tableName,
           new 
GetPartitionsArgs.GetPartitionsArgsBuilder().max(max_parts).build());
@@ -5829,7 +5763,9 @@ public GetPartitionsResponse 
get_partitions_with_specs(GetPartitionsRequest requ
     GetPartitionsResponse response = null;
     Exception ex = null;
     try {
-      Table table = get_table_core(catName, parsedDbName[DB_NAME], tableName);
+      GetTableRequest getTableRequest = new 
GetTableRequest(parsedDbName[DB_NAME], tableName);
+      getTableRequest.setCatName(catName);
+      Table table = get_table_core(getTableRequest);
       List<Partition> partitions = getMS()
           .getPartitionSpecsByFilterAndProjection(table, 
request.getProjectionSpec(),
               request.getFilterSpec());
@@ -6460,7 +6396,9 @@ private List<FieldSchema> 
get_fields_with_environment_context_core(String db, St
     Exception ex = null;
     try {
       try {
-        tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
base_table_name);
+        GetTableRequest getTableRequest = new 
GetTableRequest(parsedDbName[DB_NAME], base_table_name);
+        getTableRequest.setCatName(parsedDbName[CAT_NAME]);
+        tbl = get_table_core(getTableRequest);
         firePreEvent(new PreReadTableEvent(tbl, this));
       } catch (NoSuchObjectException e) {
         throw new UnknownTableException(e.getMessage());
@@ -6560,7 +6498,9 @@ private List<FieldSchema> 
get_schema_with_environment_context_core(String db, St
 
       Table tbl;
       try {
-        tbl = get_table_core(parsedDbName[CAT_NAME], parsedDbName[DB_NAME], 
base_table_name);
+        GetTableRequest getTableRequest = new 
GetTableRequest(parsedDbName[DB_NAME], base_table_name);
+        getTableRequest.setCatName(parsedDbName[CAT_NAME]);
+        tbl = get_table_core(getTableRequest);
       } catch (NoSuchObjectException e) {
         throw new UnknownTableException(e.getMessage());
       }
@@ -6757,21 +6697,6 @@ public Partition 
append_partition_by_name_with_environment_context(final String
     return ret;
   }
 
-  private boolean drop_partition_by_name_core(final RawStore ms, final String 
catName,
-      final String db_name, final String tbl_name, final String part_name,
-      final boolean deleteData, final EnvironmentContext envContext)
-      throws TException, IOException {
-
-    List<String> partVals;
-    try {
-      partVals = getPartValsFromName(ms, catName, db_name, tbl_name, 
part_name);
-    } catch (InvalidObjectException e) {
-      throw new NoSuchObjectException(e.getMessage());
-    }
-
-    return drop_partition_common(ms, catName, db_name, tbl_name, partVals, 
deleteData, envContext);
-  }
-
   @Deprecated
   @Override
   public boolean drop_partition_by_name(final String db_name, final String 
tbl_name,
@@ -7530,7 +7455,9 @@ public List<PartitionSpec> get_part_specs_by_filter(final 
String dbName, final S
             maxParts, filter);
     List<PartitionSpec> partitionSpecs = null;
     try {
-      Table table = get_table_core(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], tblName);
+      GetTableRequest getTableRequest = new 
GetTableRequest(parsedDbName[DB_NAME], tblName);
+      getTableRequest.setCatName(parsedDbName[CAT_NAME]);
+      Table table = get_table_core(getTableRequest);
       // Don't pass the parsed db name, as get_partitions_by_filter will parse 
it itself
       List<Partition> partitions = get_partitions_by_filter(dbName, tblName, 
filter, (short) maxParts);
 
@@ -7572,7 +7499,10 @@ public PartitionsSpecByExprResult 
get_partitions_spec_by_expr(
               .includeParamKeyPattern(req.getIncludeParamKeyPattern())
               .excludeParamKeyPattern(req.getExcludeParamKeyPattern())
               .build());
-      Table table = get_table_core(catName, dbName, tblName);
+
+      GetTableRequest getTableRequest = new GetTableRequest(dbName, tblName);
+      getTableRequest.setCatName(catName);
+      Table table = get_table_core(getTableRequest);
       List<PartitionSpec> partitionSpecs =
           
MetaStoreServerUtils.getPartitionspecsGroupedByStorageDescriptor(table, 
result.getPartitions());
       ret = new PartitionsSpecByExprResult(partitionSpecs, 
result.isHasUnknownPartitions());
@@ -7847,8 +7777,9 @@ private String getPartName(HiveObjectRef hiveObject) 
throws MetaException {
       try {
         String catName = hiveObject.isSetCatName() ? hiveObject.getCatName() :
             getDefaultCatalog(conf);
-        Table table = get_table_core(catName, hiveObject.getDbName(), 
hiveObject
-            .getObjectName());
+        GetTableRequest getTableRequest = new 
GetTableRequest(hiveObject.getDbName(), hiveObject.getObjectName());
+        getTableRequest.setCatName(catName);
+        Table table = get_table_core(getTableRequest);
         partName = Warehouse
             .makePartName(table.getPartitionKeys(), partValue);
       } catch (NoSuchObjectException e) {
@@ -8285,7 +8216,9 @@ private List<HiveObjectPrivilege> 
list_partition_column_privileges(
       if (dbName == null) {
         return getMS().listPrincipalPartitionColumnGrantsAll(principalName, 
principalType);
       }
-      Table tbl = get_table_core(catName, dbName, tableName);
+      GetTableRequest getTableRequest = new GetTableRequest(dbName, tableName);
+      getTableRequest.setCatName(catName);
+      Table tbl = get_table_core(getTableRequest);
       String partName = Warehouse.makePartName(tbl.getPartitionKeys(), 
partValues);
       if (principalName == null) {
         return getMS().listPartitionColumnGrantsAll(catName, dbName, 
tableName, partName, columnName);
@@ -8344,7 +8277,9 @@ private List<HiveObjectPrivilege> 
list_partition_privileges(
       if (dbName == null) {
         return getMS().listPrincipalPartitionGrantsAll(principalName, 
principalType);
       }
-      Table tbl = get_table_core(catName, dbName, tableName);
+      GetTableRequest getTableRequest = new GetTableRequest(dbName, tableName);
+      getTableRequest.setCatName(catName);
+      Table tbl = get_table_core(getTableRequest);
       String partName = Warehouse.makePartName(tbl.getPartitionKeys(), 
partValues);
       if (principalName == null) {
         return getMS().listPartitionGrantsAll(catName, dbName, tableName, 
partName);
@@ -8855,23 +8790,13 @@ private void validateForAlterFunction(String dbName, 
String funcName, Function n
   @Override
   public List<String> get_functions(String dbName, String pattern)
       throws MetaException {
-    startFunction("get_functions", ": db=" + dbName + " pat=" + pattern);
-
-    RawStore ms = getMS();
-    Exception ex = null;
-    List<String> funcNames = null;
     String[] parsedDbName = parseDbName(dbName, conf);
-
-    try {
-      funcNames = ms.getFunctions(parsedDbName[CAT_NAME], 
parsedDbName[DB_NAME], pattern);
-    } catch (Exception e) {
-      ex = e;
-      throw newMetaException(e);
-    } finally {
-      endFunction("get_functions", funcNames != null, ex);
-    }
-
-    return funcNames;
+    GetFunctionsRequest request = new 
GetFunctionsRequest(parsedDbName[DB_NAME]);
+    request.setCatalogName(parsedDbName[CAT_NAME]);
+    request.setPattern(pattern);
+    request.setReturnNames(true);
+    GetFunctionsResponse resp = get_functions_req(request);
+    return resp.getFunction_names();
   }
 
   @Override
@@ -8882,20 +8807,22 @@ public GetFunctionsResponse 
get_functions_req(GetFunctionsRequest req)
 
     RawStore ms = getMS();
     Exception ex = null;
-    List<Function> funcs = null;
+    GetFunctionsResponse response = new GetFunctionsResponse();
     String catName = req.isSetCatalogName() ? req.getCatalogName() : 
getDefaultCatalog(conf);
     try {
-      funcs = ms.getFunctionsRequest(catName, req.getDbName(),
+      List result = ms.getFunctionsRequest(catName, req.getDbName(),
           req.getPattern(), req.isReturnNames());
+      if (req.isReturnNames()) {
+        response.setFunction_names(result);
+      } else {
+        response.setFunctions(result);
+      }
     } catch (Exception e) {
       ex = e;
       throw newMetaException(e);
     } finally {
-      endFunction("get_functions", funcs != null, ex);
+      endFunction("get_functions", ex == null, ex);
     }
-    GetFunctionsResponse response = new GetFunctionsResponse();
-    response.setFunctions(funcs);
-
     return response;
   }
 
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
index 36af8abc04d..1afefb7c404 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java
@@ -556,15 +556,6 @@ String getSimpleMessage(Exception ex) {
     return ex.getMessage();
   }
 
-  @Override
-  public Partition alterPartition(final RawStore msdb, Warehouse wh, final 
String dbname,
-    final String name, final List<String> part_vals, final Partition new_part,
-    EnvironmentContext environmentContext)
-      throws InvalidOperationException, InvalidObjectException, 
AlreadyExistsException, MetaException, NoSuchObjectException {
-    return alterPartition(msdb, wh, MetaStoreUtils.getDefaultCatalog(conf), 
dbname, name, part_vals, new_part,
-        environmentContext, null, null);
-  }
-
   @Override
   public Partition alterPartition(RawStore msdb, Warehouse wh, String catName, 
String dbname,
       String name, List<String> part_vals, final Partition new_part,
@@ -798,16 +789,6 @@ public Partition alterPartition(RawStore msdb, Warehouse 
wh, String catName, Str
     return oldPart;
   }
 
-  @Deprecated
-  @Override
-  public List<Partition> alterPartitions(final RawStore msdb, Warehouse wh, 
final String dbname,
-    final String name, final List<Partition> new_parts,
-    EnvironmentContext environmentContext)
-      throws InvalidOperationException, InvalidObjectException, 
AlreadyExistsException, MetaException {
-    return alterPartitions(msdb, wh, MetaStoreUtils.getDefaultCatalog(conf), 
dbname, name, new_parts,
-        environmentContext, null, -1, null);
-  }
-
   private Map<List<String>, Partition> getExistingPartitions(final RawStore 
msdb,
       final List<Partition> new_parts, final Table tbl, final String catName,
       final String dbname, final String name)
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
index cbdd311dd1b..36fc6fb6d03 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/IHMSHandler.java
@@ -74,26 +74,6 @@ public interface IHMSHandler extends 
ThriftHiveMetastore.Iface, Configurable {
   Database get_database_core(final String catName, final String name)
       throws NoSuchObjectException, MetaException;
 
-  /**
-   * Equivalent of get_table, but does not log audits and fire pre-event 
listener.
-   * Meant to be used for calls made by other hive classes, that are not using 
the
-   * thrift interface.
-   * @param catName catalog name
-   * @param dbname database name
-   * @param name table name
-   * @return Table object
-   * @throws NoSuchObjectException If the table does not exist.
-   * @throws MetaException  If another error occurs.
-   */
-  @Deprecated
-  Table get_table_core(final String catName, final String dbname, final String 
name)
-      throws MetaException, NoSuchObjectException;
-  @Deprecated
-  Table get_table_core(final String catName, final String dbname,
-                       final String name,
-                       final String writeIdList)
-      throws MetaException, NoSuchObjectException;
-
   /**
    *
    * @param getTableRequest request object to query table in HMS
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
index 965ff0edc16..2da8955d5dd 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/ObjectStore.java
@@ -26,7 +26,6 @@
 
 import java.io.IOException;
 import java.net.InetAddress;
-import java.nio.ByteBuffer;
 import java.sql.Connection;
 import java.sql.SQLException;
 import java.sql.SQLIntegrityConstraintViolationException;
@@ -101,7 +100,6 @@
 import org.apache.hadoop.hive.metastore.api.DatabaseType;
 import org.apache.hadoop.hive.metastore.api.DataConnector;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.FileMetadataExprType;
 import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
 import org.apache.hadoop.hive.metastore.api.Function;
 import org.apache.hadoop.hive.metastore.api.FunctionType;
@@ -249,7 +247,6 @@
 import org.apache.hadoop.hive.metastore.model.MReplicationMetrics;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree;
 import org.apache.hadoop.hive.metastore.parser.ExpressionTree.FilterBuilder;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.properties.CachingPropertyStore;
 import org.apache.hadoop.hive.metastore.properties.PropertyStore;
 import org.apache.hadoop.hive.metastore.tools.SQLGenerator;
@@ -1247,51 +1244,6 @@ public boolean dropDataConnector(String dcname)
     return success;
   }
 
-  /*
-  public DataConnector getDataConnectorInternal(String name)
-      throws MetaException, NoSuchObjectException {
-    return new GetDcHelper(name, true, true) {
-      @Override
-      protected DataConnector getSqlResult(GetHelper<DataConnector> ctx) 
throws MetaException {
-        try {
-        return getJDODataConnector(name);
-      }
-
-      @Override
-      protected DataConnector getJdoResult(GetHelper<DataConnector> ctx) 
throws MetaException, NoSuchObjectException {
-        return getJDODataConnector(name);
-      }
-    }.run(false);
-  }
-
-  private DataConnector getDataConnectorInternal(String name) throws 
NoSuchObjectException {
-    MDataConnector mdc = null;
-    boolean commited = false;
-    try {
-      openTransaction();
-      mdc = getMDataConnector(name);
-      commited = commitTransaction();
-    } finally {
-      if (!commited) {
-        rollbackTransaction();
-      }
-    }
-    DataConnector connector = new DataConnector();
-    connector.setName(mdc.getName());
-    connector.setType(mdc.getType());
-    connector.setUrl(mdc.getUrl());
-    connector.setDescription(mdc.getDescription());
-    connector.setParameters(convertMap(mdc.getParameters()));
-    connector.setOwnerName(mdc.getOwnerName());
-    String type = 
org.apache.commons.lang3.StringUtils.defaultIfBlank(mdc.getOwnerType(), null);
-    PrincipalType principalType = (type == null) ? null : 
PrincipalType.valueOf(type);
-    connector.setOwnerType(principalType);
-    connector.setCreateTime(mdc.getCreateTime());
-    return connector;
-  }
-   */
-
-
   private MType getMType(Type type) {
     List<MFieldSchema> fields = new ArrayList<>();
     if (type.getFields() != null) {
@@ -1518,7 +1470,7 @@ public boolean dropTable(String catName, String dbName, 
String tableName)
         }
         // delete column statistics if present
         try {
-          deleteTableColumnStatistics(catName, dbName, tableName, (String) 
null, null);
+          deleteTableColumnStatistics(catName, dbName, tableName, null, null);
         } catch (NoSuchObjectException e) {
           LOG.info("Found no table level column statistics associated with {} 
to delete",
               TableName.getQualified(catName, dbName, tableName));
@@ -2759,80 +2711,6 @@ protected String describeResult() {
     }
   }
 
-  private boolean isValidPartition(
-      Partition part, List<FieldSchema> partitionKeys, boolean ifNotExists) 
throws MetaException {
-    MetaStoreServerUtils.validatePartitionNameCharacters(part.getValues(), 
conf);
-    boolean doesExist = doesPartitionExist(part.getCatName(),
-        part.getDbName(), part.getTableName(), partitionKeys, 
part.getValues());
-    if (doesExist && !ifNotExists) {
-      throw new MetaException("Partition already exists: " + part);
-    }
-    return !doesExist;
-  }
-
-  @Override
-  public boolean addPartitions(String catName, String dbName, String tblName,
-                               PartitionSpecProxy partitionSpec, boolean 
ifNotExists)
-      throws InvalidObjectException, MetaException {
-    boolean success = false;
-    openTransaction();
-    try {
-      List<MTablePrivilege> tabGrants = null;
-      List<MTableColumnPrivilege> tabColumnGrants = null;
-      MTable table = this.getMTable(catName, dbName, tblName);
-      if (table == null) {
-        throw new InvalidObjectException("Unable to add partitions because "
-            + TableName.getQualified(catName, dbName, tblName) +
-            " does not exist");
-      }
-      if 
("TRUE".equalsIgnoreCase(table.getParameters().get("PARTITION_LEVEL_PRIVILEGE")))
 {
-        tabGrants = this.listAllTableGrants(catName, dbName, tblName);
-        tabColumnGrants = this.listTableAllColumnGrants(catName, dbName, 
tblName);
-      }
-
-      if (!partitionSpec.getTableName().equals(tblName) || 
!partitionSpec.getDbName().equals(dbName)) {
-        throw new MetaException("Partition does not belong to target table "
-            + dbName + "." + tblName + ": " + partitionSpec);
-      }
-
-      PartitionSpecProxy.PartitionIterator iterator = 
partitionSpec.getPartitionIterator();
-
-      int now = (int) (System.currentTimeMillis() / 1000);
-
-      List<FieldSchema> partitionKeys = 
convertToFieldSchemas(table.getPartitionKeys());
-      while (iterator.hasNext()) {
-        Partition part = iterator.next();
-
-        if (isValidPartition(part, partitionKeys, ifNotExists)) {
-          MPartition mpart = convertToMPart(part, table);
-          pm.makePersistent(mpart);
-          if (tabGrants != null) {
-            for (MTablePrivilege tab : tabGrants) {
-              pm.makePersistent(new MPartitionPrivilege(tab.getPrincipalName(),
-                  tab.getPrincipalType(), mpart, tab.getPrivilege(), now,
-                  tab.getGrantor(), tab.getGrantorType(), tab.getGrantOption(),
-                  tab.getAuthorizer()));
-            }
-          }
-
-          if (tabColumnGrants != null) {
-            for (MTableColumnPrivilege col : tabColumnGrants) {
-              pm.makePersistent(new 
MPartitionColumnPrivilege(col.getPrincipalName(),
-                  col.getPrincipalType(), mpart, col.getColumnName(), 
col.getPrivilege(),
-                  now, col.getGrantor(), col.getGrantorType(), 
col.getGrantOption(),
-                  col.getAuthorizer()));
-            }
-          }
-        }
-      }
-
-      success = commitTransaction();
-    } finally {
-      rollbackAndCleanup(success, null);
-    }
-    return success;
-  }
-
   @Override
   public boolean addPartition(Partition part) throws InvalidObjectException,
       MetaException {
@@ -3057,22 +2935,6 @@ private Partition convertToPart(String catName, String 
dbName, String tblName,
     return p;
   }
 
-  @Override
-  public boolean dropPartition(String catName, String dbName, String tableName,
-    List<String> part_vals) throws MetaException, NoSuchObjectException, 
InvalidObjectException,
-    InvalidInputException {
-    boolean success = false;
-    try {
-      openTransaction();
-      MPartition part = getMPartition(catName, dbName, tableName, part_vals, 
null);
-      dropPartitionCommon(part);
-      success = commitTransaction();
-    } finally {
-      rollbackAndCleanup(success, null);
-    }
-    return success;
-  }
-
   @Override
   public boolean dropPartition(String catName, String dbName, String 
tableName, String partName)
       throws MetaException, NoSuchObjectException, InvalidObjectException, 
InvalidInputException {
@@ -3149,63 +3011,6 @@ public List<Void> run(List<String> input) throws 
MetaException {
     }
   }
 
-  /**
-   * Drop an MPartition and cascade deletes (e.g., delete partition privilege 
grants,
-   *   drop the storage descriptor cleanly, etc.)
-   */
-  private boolean dropPartitionCommon(MPartition part) throws MetaException,
-    InvalidObjectException, InvalidInputException {
-    boolean success = false;
-    try {
-      openTransaction();
-      if (part != null) {
-        List<MFieldSchema> schemas = part.getTable().getPartitionKeys();
-        List<String> colNames = new ArrayList<>();
-        for (MFieldSchema col: schemas) {
-          colNames.add(col.getName());
-        }
-        String partName = FileUtils.makePartName(colNames, part.getValues());
-
-        List<MPartitionPrivilege> partGrants = listPartitionGrants(
-            part.getTable().getDatabase().getCatalogName(),
-            part.getTable().getDatabase().getName(),
-            part.getTable().getTableName(),
-            Lists.newArrayList(partName));
-
-        if (CollectionUtils.isNotEmpty(partGrants)) {
-          pm.deletePersistentAll(partGrants);
-        }
-
-        List<MPartitionColumnPrivilege> partColumnGrants = 
listPartitionAllColumnGrants(
-            part.getTable().getDatabase().getCatalogName(),
-            part.getTable().getDatabase().getName(),
-            part.getTable().getTableName(),
-            Lists.newArrayList(partName));
-        if (CollectionUtils.isNotEmpty(partColumnGrants)) {
-          pm.deletePersistentAll(partColumnGrants);
-        }
-
-        String catName = part.getTable().getDatabase().getCatalogName();
-        String dbName = part.getTable().getDatabase().getName();
-        String tableName = part.getTable().getTableName();
-
-        // delete partition level column stats if it exists
-       try {
-          deletePartitionColumnStatistics(catName, dbName, tableName, 
partName, part.getValues(), null, null);
-        } catch (NoSuchObjectException e) {
-          LOG.info("No column statistics records found to delete");
-        }
-
-        preDropStorageDescriptor(part.getSd());
-        pm.deletePersistent(part);
-      }
-      success = commitTransaction();
-    } finally {
-      rollbackAndCleanup(success, null);
-    }
-    return success;
-  }
-
   @Override
   public List<Partition> getPartitions(String catName, String dbName, String 
tableName,
       GetPartitionsArgs args) throws MetaException, NoSuchObjectException {
@@ -3580,7 +3385,8 @@ private PartitionValuesResponse 
extractPartitionNamesByFilter(
     }
 
     if (partitionNames == null) {
-      partitions = getPartitionsByFilter(catName, dbName, tableName, filter, 
(short) maxParts);
+      partitions = getPartitionsByFilter(catName, dbName, tableName,
+          new 
GetPartitionsArgs.GetPartitionsArgsBuilder().filter(filter).max((short) 
maxParts).build());
     }
 
     if (partitions != null) {
@@ -4626,60 +4432,6 @@ protected Integer getJdoResult(
     }.run(false);
   }
 
-  @Override
-  public int getNumPartitionsByExpr(String catName, String dbName, String 
tblName,
-                                    byte[] expr) throws MetaException, 
NoSuchObjectException {
-    final ExpressionTree exprTree = 
PartFilterExprUtil.makeExpressionTree(expressionProxy, expr, null, conf);
-    final byte[] tempExpr = expr; // Need to be final to pass it to an inner 
class
-
-    catName = normalizeIdentifier(catName);
-    dbName = normalizeIdentifier(dbName);
-    tblName = normalizeIdentifier(tblName);
-    MTable mTable = ensureGetMTable(catName, dbName, tblName);
-    List<FieldSchema> partitionKeys = 
convertToFieldSchemas(mTable.getPartitionKeys());
-
-    return new GetHelper<Integer>(catName, dbName, tblName, true, true) {
-      private final SqlFilterForPushdown filter = new SqlFilterForPushdown();
-
-      @Override
-      protected String describeResult() {
-        return "Partition count";
-      }
-
-      @Override
-      protected boolean canUseDirectSql(GetHelper<Integer> ctx) throws 
MetaException {
-        return directSql.generateSqlFilterForPushdown(catName, dbName, 
tblName, partitionKeys, exprTree, null, filter);
-      }
-
-      @Override
-      protected Integer getSqlResult(GetHelper<Integer> ctx) throws 
MetaException {
-        return directSql.getNumPartitionsViaSqlFilter(filter);
-      }
-      @Override
-      protected Integer getJdoResult(
-          GetHelper<Integer> ctx) throws MetaException, NoSuchObjectException {
-        Integer numPartitions = null;
-
-        if (exprTree != null) {
-          try {
-            numPartitions = getNumPartitionsViaOrmFilter(catName ,dbName, 
tblName, exprTree, true, partitionKeys);
-          } catch (MetaException e) {
-            numPartitions = null;
-          }
-        }
-
-        // if numPartitions could not be obtained from ORM filters, then get 
number partitions names, and count them
-        if (numPartitions == null) {
-          List<String> filteredPartNames = new ArrayList<>();
-          getPartitionNamesPrunedByExprNoTxn(catName, dbName, tblName, 
partitionKeys, tempExpr, "", (short) -1, filteredPartNames);
-          numPartitions = filteredPartNames.size();
-        }
-
-        return numPartitions;
-      }
-    }.run(false);
-  }
-
   protected List<Partition> getPartitionsByFilterInternal(
       String catName, String dbName, String tblName,
       boolean allowSql, boolean allowJdo, GetPartitionsArgs args)
@@ -5944,8 +5696,11 @@ private List<SQLForeignKey> 
addForeignKeys(List<SQLForeignKey> foreignKeys, bool
           if (parentTable.getPartitionKeys() != null) {
             parentCols.addAll(parentTable.getPartitionKeys());
           }
-          existingTablePrimaryKeys = getPrimaryKeys(catName, pkTableDB, 
pkTableName);
-          existingTableUniqueConstraints = getUniqueConstraints(catName, 
pkTableDB, pkTableName);
+          PrimaryKeysRequest primaryKeysRequest = new 
PrimaryKeysRequest(pkTableDB, pkTableName);
+          primaryKeysRequest.setCatName(catName);
+          existingTablePrimaryKeys = getPrimaryKeys(primaryKeysRequest);
+          existingTableUniqueConstraints =
+              getUniqueConstraints(new UniqueConstraintsRequest(catName, 
pkTableDB, pkTableName));
         }
 
         // Here we build an aux structure that is used to verify that the 
foreign key that is declared
@@ -8449,33 +8204,6 @@ private List<MPartitionColumnPrivilege> 
listTableAllPartitionColumnGrants(
     return mSecurityColList;
   }
 
-  private List<MPartitionColumnPrivilege> listPartitionAllColumnGrants(
-      String catName, String dbName, String tableName, List<String> partNames) 
{
-    boolean success = false;
-    tableName = normalizeIdentifier(tableName);
-    dbName = normalizeIdentifier(dbName);
-    catName = normalizeIdentifier(catName);
-
-    List<MPartitionColumnPrivilege> mSecurityColList = null;
-    try {
-      openTransaction();
-      LOG.debug("Executing listPartitionAllColumnGrants");
-      mSecurityColList = queryByPartitionNames(catName,
-          dbName, tableName, partNames, MPartitionColumnPrivilege.class,
-          "partition.table.tableName", "partition.table.database.name", 
"partition.partitionName",
-          "partition.table.database.catalogName");
-      LOG.debug("Done executing query for listPartitionAllColumnGrants");
-      pm.retrieveAll(mSecurityColList);
-      success = commitTransaction();
-      LOG.debug("Done retrieving all objects for 
listPartitionAllColumnGrants");
-    } finally {
-      if (!success) {
-        rollbackTransaction();
-      }
-    }
-    return mSecurityColList;
-  }
-
   private void dropPartitionAllColumnGrantsNoTxn(
       String catName, String dbName, String tableName, List<String> partNames) 
{
     Pair<Query, Object[]> queryWithParams = makeQueryByPartitionNames(catName,
@@ -8547,30 +8275,6 @@ private List<MDCPrivilege> 
listDataConnectorGrants(String dcName, String authori
     }
   }
 
-  private List<MPartitionPrivilege> listPartitionGrants(String catName, String 
dbName, String tableName,
-      List<String> partNames) {
-    tableName = normalizeIdentifier(tableName);
-    dbName = normalizeIdentifier(dbName);
-
-    boolean success = false;
-    List<MPartitionPrivilege> mSecurityTabPartList = null;
-    try {
-      openTransaction();
-      LOG.debug("Executing listPartitionGrants");
-      mSecurityTabPartList = queryByPartitionNames(catName,
-          dbName, tableName, partNames, MPartitionPrivilege.class, 
"partition.table.tableName",
-          "partition.table.database.name", "partition.partitionName",
-          "partition.table.database.catalogName");
-      LOG.debug("Done executing query for listPartitionGrants");
-      pm.retrieveAll(mSecurityTabPartList);
-      success = commitTransaction();
-      LOG.debug("Done retrieving all objects for listPartitionGrants");
-    } finally {
-      rollbackAndCleanup(success, null);
-    }
-    return mSecurityTabPartList;
-  }
-
   private void dropPartitionGrantsNoTxn(String catName, String dbName, String 
tableName,
                                         List<String> partNames) {
     Pair<Query, Object[]> queryWithParams = makeQueryByPartitionNames(catName,
@@ -8582,18 +8286,6 @@ private void dropPartitionGrantsNoTxn(String catName, 
String dbName, String tabl
     }
   }
 
-  private <T> List<T> queryByPartitionNames(String catName, String dbName, 
String tableName,
-      List<String> partNames, Class<T> clazz, String tbCol, String dbCol, 
String partCol,
-      String catCol) {
-    Pair<Query, Object[]> queryAndParams = makeQueryByPartitionNames(catName,
-        dbName, tableName, partNames, clazz, tbCol, dbCol, partCol, catCol);
-    try (QueryWrapper wrapper = new QueryWrapper(queryAndParams.getLeft())) {
-      List<T> results = new ArrayList<T>(
-          (List) wrapper.executeWithArray(queryAndParams.getRight()));
-      return results;
-    }
-  }
-
   private Pair<Query, Object[]> makeQueryByPartitionNames(
       String catName, String dbName, String tableName, List<String> partNames, 
Class<?> clazz,
       String tbCol, String dbCol, String partCol, String catCol) {
@@ -10116,11 +9808,6 @@ protected String describeResult() {
     }.run(true);
   }
 
-  @Override
-  public void flushCache() {
-    // NOP as there's no caching
-  }
-
   private List<MPartitionColumnStatistics> getMPartitionColumnStatistics(Table 
table, List<String> partNames,
       List<String> colNames, String engine) throws MetaException {
     boolean committed = false;
@@ -10239,7 +9926,8 @@ protected Integer getSqlResult(GetHelper<Integer> ctx) 
throws MetaException {
         @Override
         protected Integer getJdoResult(GetHelper<Integer> ctx) throws 
MetaException, NoSuchObjectException {
           try {
-            List<Partition> parts = getPartitions(catName, dbName, tableName, 
-1);
+            List<Partition> parts = getPartitions(catName, dbName, tableName,
+                GetPartitionsArgs.getAllPartitions());
             for (Partition part : parts) {
               Partition newPart = new Partition(part);
               StatsSetupConst.clearColumnStatsState(newPart.getParameters());
@@ -11027,38 +10715,6 @@ private List<Function> getAllFunctionsViaJDO (String 
catName) {
     }
   }
 
-  @Override
-  public List<String> getFunctions(String catName, String dbName, String 
pattern) throws MetaException {
-    boolean commited = false;
-    Query query = null;
-    List<String> funcs = null;
-    try {
-      openTransaction();
-      dbName = normalizeIdentifier(dbName);
-      // Take the pattern and split it on the | to get all the composing
-      // patterns
-      List<String> parameterVals = new ArrayList<>();
-      StringBuilder filterBuilder = new StringBuilder();
-      appendSimpleCondition(filterBuilder, "database.name", new String[] { 
dbName }, parameterVals);
-      appendSimpleCondition(filterBuilder, "database.catalogName", new 
String[] {catName}, parameterVals);
-      if(pattern != null) {
-        appendPatternCondition(filterBuilder, "functionName", pattern, 
parameterVals);
-      }
-      query = pm.newQuery(MFunction.class, filterBuilder.toString());
-      query.setResult("functionName");
-      query.setOrdering("functionName ascending");
-      Collection names = (Collection) 
query.executeWithArray(parameterVals.toArray(new String[0]));
-      funcs = new ArrayList<>();
-      for (Iterator i = names.iterator(); i.hasNext();) {
-        funcs.add((String) i.next());
-      }
-      commited = commitTransaction();
-    } finally {
-      rollbackAndCleanup(commited, query);
-    }
-    return funcs;
-  }
-
   @Override
   public <T> List<T> getFunctionsRequest(String catName, String dbName, String 
pattern,
       boolean isReturnNames) throws MetaException {
@@ -11081,18 +10737,18 @@ public <T> List<T> getFunctionsRequest(String 
catName, String dbName, String pat
         query.setResult("functionName");
       }
       query.setOrdering("functionName ascending");
-
+      List<T> result;
       if (!isReturnNames) {
-        List<MFunction> functionList = (List<MFunction>) 
query.executeWithArray(parameterVals.toArray(new String[0]));
+        List<MFunction> functionList =
+            (List<MFunction>) query.executeWithArray(parameterVals.toArray(new 
String[0]));
         pm.retrieveAll(functionList);
-        commited = commitTransaction();
-        return (List<T>)convertToFunctions(functionList);
+        result = (List<T>) convertToFunctions(functionList);
       } else {
         List<String> functionList = (List<String>) 
query.executeWithArray(parameterVals.toArray(new String[0]));
-        pm.retrieveAll(functionList);
-        commited = commitTransaction();
-        return (List<T>)functionList;
+        result = (List<T>) new ArrayList<>(functionList);
       }
+      commited = commitTransaction();
+      return result;
     } finally {
       rollbackAndCleanup(commited, query);
     }
@@ -11783,40 +11439,6 @@ private NotificationEvent 
translateDbToThrift(MNotificationLog dbEvent) {
     return event;
   }
 
-  @Override
-  public boolean isFileMetadataSupported() {
-    return false;
-  }
-
-  @Override
-  public ByteBuffer[] getFileMetadata(List<Long> fileIds) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void putFileMetadata(
-      List<Long> fileIds, List<ByteBuffer> metadata, FileMetadataExprType 
type) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType 
type, byte[] expr,
-      ByteBuffer[] metadatas, ByteBuffer[] stripeBitsets, boolean[] 
eliminated) {
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) 
{
-    throw new UnsupportedOperationException();
-  }
-
-  @Override
-  public List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, 
String tbl_name) throws MetaException {
-    PrimaryKeysRequest request = new PrimaryKeysRequest(db_name, tbl_name);
-    request.setCatName(catName);
-    return getPrimaryKeys(request);
-  }
-
   @Override
   public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest request) throws 
MetaException {
     try {
@@ -11913,15 +11535,6 @@ private String getPrimaryKeyConstraintName(String 
catName, String dbName, String
      return ret;
    }
 
-  @Override
-  public List<SQLForeignKey> getForeignKeys(String catName, String 
parent_db_name, String parent_tbl_name,
-      String foreign_db_name, String foreign_tbl_name) throws MetaException {
-    ForeignKeysRequest request =
-        new ForeignKeysRequest(parent_db_name, parent_tbl_name, 
foreign_db_name, foreign_tbl_name);
-    request.setCatName(catName);
-    return getForeignKeys(request);
-  }
-
   @Override
   public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest request) throws 
MetaException {
     try {
@@ -12056,13 +11669,6 @@ private List<SQLForeignKey> 
getForeignKeysViaJdo(String catName, String parentDb
     return foreignKeys;
   }
 
-  @Override
-  public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String 
db_name, String tbl_name)
-      throws MetaException {
-    UniqueConstraintsRequest request = new UniqueConstraintsRequest(catName, 
db_name, tbl_name);
-    return getUniqueConstraints(request);
-  }
-
   @Override
   public List<SQLUniqueConstraint> 
getUniqueConstraints(UniqueConstraintsRequest request) throws MetaException {
     try {
@@ -12127,13 +11733,6 @@ private List<SQLUniqueConstraint> 
getUniqueConstraintsViaJdo(String catName, Str
     return uniqueConstraints;
   }
 
-  @Override
-  public List<SQLNotNullConstraint> getNotNullConstraints(String catName, 
String db_name, String tbl_name)
-      throws MetaException {
-    NotNullConstraintsRequest request = new NotNullConstraintsRequest(catName, 
db_name, tbl_name);
-    return getNotNullConstraints(request);
-  }
-
   @Override
   public List<SQLNotNullConstraint> 
getNotNullConstraints(NotNullConstraintsRequest request) throws MetaException {
     try {
@@ -12143,13 +11742,6 @@ public List<SQLNotNullConstraint> 
getNotNullConstraints(NotNullConstraintsReques
     }
   }
 
-  @Override
-  public List<SQLDefaultConstraint> getDefaultConstraints(String catName, 
String db_name, String tbl_name)
-      throws MetaException {
-     DefaultConstraintsRequest request = new 
DefaultConstraintsRequest(catName, db_name, tbl_name);
-     return getDefaultConstraints(request);
-  }
-
   @Override
   public List<SQLDefaultConstraint> 
getDefaultConstraints(DefaultConstraintsRequest request) throws MetaException {
     try {
@@ -12159,13 +11751,6 @@ public List<SQLDefaultConstraint> 
getDefaultConstraints(DefaultConstraintsReques
     }
   }
 
-  @Override
-  public List<SQLCheckConstraint> getCheckConstraints(String catName, String 
db_name, String tbl_name)
-      throws MetaException {
-    CheckConstraintsRequest request = new CheckConstraintsRequest(catName, 
db_name, tbl_name);
-    return  getCheckConstraints(request);
-  }
-
   @Override
   public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest 
request) throws MetaException {
     try {
@@ -12343,22 +11928,6 @@ private List<SQLNotNullConstraint> 
getNotNullConstraintsViaJdo(String catName, S
     return notNullConstraints;
   }
 
-  /**
-   * Api to fetch all constraints at once
-   * @param catName catalog name
-   * @param dbName database name
-   * @param tblName table name
-   * @return list of all constraint for a given table
-   * @throws MetaException
-   */
-  @Override
-  @Deprecated
-  public SQLAllTableConstraints getAllTableConstraints(String catName, String 
dbName, String tblName)
-      throws MetaException,NoSuchObjectException {
-    AllTableConstraintsRequest request = new 
AllTableConstraintsRequest(dbName,tblName,catName);
-    return getAllTableConstraints(request);
-  }
-
   /**
    * Api to fetch all constraints at once
    * @param request request object
@@ -12375,12 +11944,21 @@ public SQLAllTableConstraints 
getAllTableConstraints(AllTableConstraintsRequest
     debugLog("Get all table constraints for the table - " + catName + "." + 
dbName + "." + tblName
         + " in class ObjectStore.java");
     SQLAllTableConstraints sqlAllTableConstraints = new 
SQLAllTableConstraints();
-    sqlAllTableConstraints.setPrimaryKeys(getPrimaryKeys(catName, dbName, 
tblName));
-    sqlAllTableConstraints.setForeignKeys(getForeignKeys(catName, null, null, 
dbName, tblName));
-    sqlAllTableConstraints.setUniqueConstraints(getUniqueConstraints(catName, 
dbName, tblName));
-    
sqlAllTableConstraints.setDefaultConstraints(getDefaultConstraints(catName, 
dbName, tblName));
-    sqlAllTableConstraints.setCheckConstraints(getCheckConstraints(catName, 
dbName, tblName));
-    
sqlAllTableConstraints.setNotNullConstraints(getNotNullConstraints(catName, 
dbName, tblName));
+    PrimaryKeysRequest primaryKeysRequest = new PrimaryKeysRequest(dbName, 
tblName);
+    primaryKeysRequest.setCatName(catName);
+    sqlAllTableConstraints.setPrimaryKeys(getPrimaryKeys(primaryKeysRequest));
+    ForeignKeysRequest foreignKeysRequest =
+        new ForeignKeysRequest(null, null, dbName, tblName);
+    foreignKeysRequest.setCatName(catName);
+    sqlAllTableConstraints.setForeignKeys(getForeignKeys(foreignKeysRequest));
+    sqlAllTableConstraints.
+        setUniqueConstraints(getUniqueConstraints(new 
UniqueConstraintsRequest(catName, dbName, tblName)));
+    sqlAllTableConstraints.
+        setDefaultConstraints(getDefaultConstraints(new 
DefaultConstraintsRequest(catName, dbName, tblName)));
+    sqlAllTableConstraints.
+        setCheckConstraints(getCheckConstraints(new 
CheckConstraintsRequest(catName, dbName, tblName)));
+    sqlAllTableConstraints.
+        setNotNullConstraints(getNotNullConstraints(new 
NotNullConstraintsRequest(catName, dbName, tblName)));
     return sqlAllTableConstraints;
   }
 
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
index d0e73d75ec9..f4db86722f0 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/RawStore.java
@@ -23,7 +23,6 @@
 import java.lang.annotation.RetentionPolicy;
 import java.lang.annotation.Target;
 import java.nio.ByteBuffer;
-import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
 
@@ -116,7 +115,6 @@
 import org.apache.hadoop.hive.metastore.api.WriteEventInfo;
 import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs;
 import org.apache.hadoop.hive.metastore.model.MTable;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.properties.PropertyStore;
 import 
org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils.ColStatsObjWithSourceInfo;
 import org.apache.thrift.TException;
@@ -398,23 +396,6 @@ boolean addPartition(Partition part)
   boolean addPartitions(String catName, String dbName, String tblName, 
List<Partition> parts)
       throws InvalidObjectException, MetaException;
 
-  /**
-   * @deprecated use {@link #addPartitions(String, String, String, List)} 
instead.
-   * @param catName catalog name.
-   * @param dbName database name.
-   * @param tblName table name.
-   * @param partitionSpec specification for the partition
-   * @param ifNotExists whether it is in an error if the partition already 
exists.  If true, then
-   *                   it is not an error if the partition exists, if false, 
it is.
-   * @return whether the partition was created.
-   * @throws InvalidObjectException The passed in partition spec or table 
specification is invalid.
-   * @throws MetaException error writing to RDBMS.
-   */
-  @Deprecated
-  boolean addPartitions(String catName, String dbName, String tblName,
-                        PartitionSpecProxy partitionSpec, boolean ifNotExists)
-      throws InvalidObjectException, MetaException;
-
   /**
    * Get a partition.
    * @param catName catalog name.
@@ -458,23 +439,6 @@ boolean doesPartitionExist(String catName, String dbName, 
String tableName,
       List<FieldSchema> partKeys, List<String> part_vals)
       throws MetaException, NoSuchObjectException;
 
-  /**
-   * Drop a partition.
-   * @param catName catalog name.
-   * @param dbName database name.
-   * @param tableName table name.
-   * @param part_vals list of partition values.
-   * @return true if the partition was dropped.
-   * @throws MetaException Error accessing the RDBMS.
-   * @throws NoSuchObjectException no partition matching this description 
exists
-   * @throws InvalidObjectException error dropping the statistics for the 
partition
-   * @throws InvalidInputException error dropping the statistics for the 
partition
-   */
-  @Deprecated
-  boolean dropPartition(String catName, String dbName, String tableName,
-      List<String> part_vals) throws MetaException, NoSuchObjectException, 
InvalidObjectException,
-      InvalidInputException;
-
   /**
    * Drop a partition.
    * @param catName catalog name.
@@ -490,23 +454,6 @@ boolean dropPartition(String catName, String dbName, 
String tableName,
   boolean dropPartition(String catName, String dbName, String tableName, 
String partName)
       throws MetaException, NoSuchObjectException, InvalidObjectException, 
InvalidInputException;
 
-  /**
-   * Get some or all partitions for a table.
-   * @param catName catalog name.
-   * @param dbName database name.
-   * @param tableName table name
-   * @param max maximum number of partitions, or -1 to get all partitions.
-   * @return list of partitions
-   * @throws MetaException error access the RDBMS.
-   * @throws NoSuchObjectException no such table exists
-   */
-  @Deprecated
-  default List<Partition> getPartitions(String catName, String dbName,
-      String tableName, int max) throws MetaException, NoSuchObjectException {
-    return getPartitions(catName, dbName, tableName, new GetPartitionsArgs
-        .GetPartitionsArgsBuilder().max(max).build());
-  }
-
   /**
    * Get some or all partitions for a table.
    * @param catName catalog name.
@@ -767,27 +714,6 @@ List<Partition> alterPartitions(String catName, String 
db_name, String tbl_name,
       String queryValidWriteIds)
       throws InvalidObjectException, MetaException;
 
-  /**
-   * Get partitions with a filter.  This is a portion of the SQL where clause.
-   * @param catName catalog name
-   * @param dbName database name
-   * @param tblName table name
-   * @param filter SQL where clause filter
-   * @param maxParts maximum number of partitions to return, or -1 for all.
-   * @return list of partition objects matching the criteria
-   * @throws MetaException Error accessing the RDBMS or processing the filter.
-   * @throws NoSuchObjectException no such table.
-   */
-  @Deprecated
-  default List<Partition> getPartitionsByFilter(
-     String catName, String dbName, String tblName, String filter, short 
maxParts)
-     throws MetaException, NoSuchObjectException {
-    return getPartitionsByFilter(catName, dbName, tblName, new 
GetPartitionsArgs
-        .GetPartitionsArgsBuilder()
-        .filter(filter).max(maxParts)
-        .build());
-  }
-
   /**
    * Get partitions with a filter.  This is a portion of the SQL where clause.
    * @param catName catalog name
@@ -833,27 +759,6 @@ List<Partition> getPartitionsByFilter(
   List<Partition> getPartitionSpecsByFilterAndProjection(Table table,
                                                          GetProjectionsSpec 
projectionSpec, GetPartitionsFilterSpec filterSpec)
       throws MetaException, NoSuchObjectException;
-  /**
-   * Get partitions using an already parsed expression.
-   * @param catName catalog name.
-   * @param dbName database name
-   * @param tblName table name
-   * @param expr an already parsed Hive expression
-   * @param defaultPartitionName default name of a partition
-   * @param maxParts maximum number of partitions to return, or -1 for all
-   * @param result list to place resulting partitions in
-   * @return true if the result contains unknown partitions.
-   * @throws TException error executing the expression
-   */
-  @Deprecated
-  default boolean getPartitionsByExpr(String catName, String dbName, String 
tblName,
-       byte[] expr, String defaultPartitionName, short maxParts, 
List<Partition> result)
-       throws TException {
-    return getPartitionsByExpr(catName, dbName, tblName, result, new 
GetPartitionsArgs
-        .GetPartitionsArgsBuilder()
-        .expr(expr).defaultPartName(defaultPartitionName).max(maxParts)
-        .build());
-  }
 
   /**
    * Get partitions using an already parsed expression.
@@ -881,20 +786,6 @@ boolean getPartitionsByExpr(String catName, String dbName, 
String tblName,
   int getNumPartitionsByFilter(String catName, String dbName, String tblName, 
String filter)
     throws MetaException, NoSuchObjectException;
 
-  /**
-   * Get the number of partitions that match an already parsed expression.
-   * @param catName catalog name.
-   * @param dbName database name.
-   * @param tblName table name.
-   * @param expr an already parsed Hive expression
-   * @return number of matching partitions.
-   * @throws MetaException error accessing the RDBMS or working with the 
expression.
-   * @throws NoSuchObjectException no such table.
-   */
-  @Deprecated
-  int getNumPartitionsByExpr(String catName, String dbName, String tblName, 
byte[] expr)
-      throws MetaException, NoSuchObjectException;
-
   /**
    * Get the number of partitions that match a given partial specification.
    * @param catName catalog name.
@@ -1137,7 +1028,6 @@ List<RolePrincipalGrant> listRolesWithGrants(String 
principalName,
    */
   List<RolePrincipalGrant> listRoleMembers(String roleName);
 
-
   /**
    * Fetch a partition along with privilege information for a particular user.
    * @param catName catalog name.
@@ -1155,30 +1045,6 @@ Partition getPartitionWithAuth(String catName, String 
dbName, String tblName,
       List<String> partVals, String user_name, List<String> group_names)
       throws MetaException, NoSuchObjectException, InvalidObjectException;
 
-  /**
-   * Fetch some or all partitions for a table, along with privilege 
information for a particular
-   * user.
-   * @param catName catalog name.
-   * @param dbName database name.
-   * @param tblName table name.
-   * @param maxParts maximum number of partitions to fetch, -1 for all 
partitions.
-   * @param userName user to get privilege information for.
-   * @param groupNames groups to get privilege information for.
-   * @return list of partitions.
-   * @throws MetaException error access the RDBMS.
-   * @throws NoSuchObjectException no such table exists
-   * @throws InvalidObjectException error fetching privilege information.
-   */
-  @Deprecated
-  default List<Partition> getPartitionsWithAuth(String catName, String dbName,
-       String tblName, short maxParts, String userName, List<String> 
groupNames)
-       throws MetaException, NoSuchObjectException, InvalidObjectException {
-    return listPartitionsPsWithAuth(catName, dbName, tblName,
-        new GetPartitionsArgs.GetPartitionsArgsBuilder()
-            .max(maxParts).userName(userName).groupNames(groupNames)
-            .build());
-  }
-
   /**
    * Lists partition names that match a given partial specification
    * @param catName catalog name.
@@ -1199,38 +1065,6 @@ List<String> listPartitionNamesPs(String catName, String 
db_name, String tbl_nam
       List<String> part_vals, short max_parts)
       throws MetaException, NoSuchObjectException;
 
-  /**
-   * Lists partitions that match a given partial specification and sets their 
auth privileges.
-   *   If userName and groupNames null, then no auth privileges are set.
-   * @param catName catalog name.
-   * @param db_name
-   *          The name of the database which has the partitions
-   * @param tbl_name
-   *          The name of the table which has the partitions
-   * @param part_vals
-   *          A partial list of values for partitions in order of the table's 
partition keys
-   *          Entries can be empty if you need to specify latter partitions.
-   * @param max_parts
-   *          The maximum number of partitions to return
-   * @param userName
-   *          The user name for the partition for authentication privileges
-   * @param groupNames
-   *          The groupNames for the partition for authentication privileges
-   * @return A list of partitions that match the partial spec.
-   * @throws MetaException error access RDBMS
-   * @throws NoSuchObjectException No such table exists
-   * @throws InvalidObjectException error access privilege information
-   */
-  @Deprecated
-  default List<Partition> listPartitionsPsWithAuth(String catName, String 
db_name, String tbl_name,
-      List<String> part_vals, short max_parts, String userName, List<String> 
groupNames)
-      throws MetaException, InvalidObjectException, NoSuchObjectException {
-    return listPartitionsPsWithAuth(catName, db_name, tbl_name, new 
GetPartitionsArgs
-        .GetPartitionsArgsBuilder()
-        
.part_vals(part_vals).max(max_parts).userName(userName).groupNames(groupNames)
-        .build());
-  }
-
   /**
    * Lists partitions that match a given partial specification and sets their 
auth privileges.
    *   If userName and groupNames null, then no auth privileges are set.
@@ -1377,30 +1211,6 @@ List<ColumnStatistics> getPartitionColumnStatistics(
       String engine, String writeIdList)
       throws MetaException, NoSuchObjectException;
 
-  /**
-   * Deletes column statistics if present associated with a given db, table, 
partition and col. If
-   * null is passed instead of a colName, stats when present for all columns 
associated
-   * with a given db, table and partition are deleted.
-   * @param catName catalog name.
-   * @param dbName database name.
-   * @param tableName table name.
-   * @param partName partition name.
-   * @param partVals partition values.
-   * @param colName column name.
-   * @param engine engine for which we want to delete statistics
-   * @return Boolean indicating the outcome of the operation
-   * @throws NoSuchObjectException no such partition
-   * @throws MetaException error access the RDBMS
-   * @throws InvalidObjectException error dropping the stats
-   * @throws InvalidInputException bad input, such as null table or database 
name.
-   */
-  default boolean deletePartitionColumnStatistics(String catName, String 
dbName, String tableName,
-                                                  String partName, 
List<String> partVals, String colName, String engine)
-          throws NoSuchObjectException, MetaException, InvalidObjectException, 
InvalidInputException{
-    return deletePartitionColumnStatistics(catName, dbName, tableName,
-            Arrays.asList(partName), colName != null ? Arrays.asList(colName) 
: null, engine);
-  }
-
   /**
    * Deletes column statistics if present associated with a given db, table, 
partition and a list of cols. If
    * null is passed instead of a colName, stats when present for all columns 
associated
@@ -1421,25 +1231,6 @@ boolean deletePartitionColumnStatistics(String catName, 
String dbName, String ta
     List<String> partNames, List<String> colNames, String engine)
     throws NoSuchObjectException, MetaException, InvalidObjectException, 
InvalidInputException;
 
-  /**
-   * Delete statistics for a single column or all columns in a table.
-   * @param catName catalog name
-   * @param dbName database name
-   * @param tableName table name
-   * @param colName column name.  Null to delete stats for all columns in the 
table.
-   * @param engine engine for which we want to delete statistics
-   * @return true if the statistics were deleted.
-   * @throws NoSuchObjectException no such table or column.
-   * @throws MetaException error access the RDBMS.
-   * @throws InvalidObjectException error dropping the stats
-   * @throws InvalidInputException bad inputs, such as null table name.
-   */
-  default boolean deleteTableColumnStatistics(String catName, String dbName, 
String tableName,
-    String colName, String engine)
-    throws NoSuchObjectException, MetaException, InvalidObjectException, 
InvalidInputException {
-    return deleteTableColumnStatistics(catName, dbName, tableName, colName != 
null ? Arrays.asList(colName) : null, engine);
-  }
-
   /**
    * Delete statistics for a single column, a list of columns or all columns 
in a table.
    * @param catName catalog name
@@ -1656,16 +1447,6 @@ void dropFunction(String catName, String dbName, String 
funcName)
    */
   List<Function> getAllFunctions(String catName) throws MetaException;
 
-  /**
-   * Retrieve list of function names based on name pattern.
-   * @param dbName database name
-   * @param pattern pattern to match
-   * @return functions that match the pattern
-   * @throws MetaException incorrectly specified function
-   */
-  @Deprecated
-  List<String> getFunctions(String catName, String dbName, String pattern) 
throws MetaException;
-
   /**
    * Retrieve list of function names based on name pattern.
    * @param dbName database name
@@ -1763,27 +1544,35 @@ List<ColStatsObjWithSourceInfo> 
getPartitionColStatsForDatabase(String catName,
    * Flush any catalog objects held by the metastore implementation.  Note 
that this does not
    * flush statistics objects.  This should be called at the beginning of each 
query.
    */
-  void flushCache();
+  default void flushCache() {
+    // NOP as there's no caching
+  }
 
   /**
    * @param fileIds List of file IDs from the filesystem.
    * @return File metadata buffers from file metadata cache. The array is 
fileIds-sized, and
    *         the entries (or nulls, if metadata is not in cache) correspond to 
fileIds in the list
    */
-  ByteBuffer[] getFileMetadata(List<Long> fileIds) throws MetaException;
+  default ByteBuffer[] getFileMetadata(List<Long> fileIds) throws 
MetaException {
+    throw new UnsupportedOperationException();
+  }
 
   /**
    * @param fileIds List of file IDs from the filesystem.
    * @param metadata Metadata buffers corresponding to fileIds in the list.
    * @param type The type; determines the class that can do additional 
processing for metadata.
    */
-  void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata,
-      FileMetadataExprType type) throws MetaException;
+  default void putFileMetadata(List<Long> fileIds, List<ByteBuffer> metadata,
+      FileMetadataExprType type) throws MetaException {
+    throw new UnsupportedOperationException();
+  }
 
   /**
    * @return Whether file metadata cache is supported by this implementation.
    */
-  boolean isFileMetadataSupported();
+  default boolean isFileMetadataSupported() {
+    return false;
+  }
 
   /**
    * Gets file metadata from cache after applying a format-specific expression 
that can
@@ -1798,12 +1587,16 @@ void putFileMetadata(List<Long> fileIds, 
List<ByteBuffer> metadata,
    * @param eliminated Output parameter; fileIds-sized array to receive the 
indication of whether
    *                   the corresponding files are entirely eliminated by the 
expression.
    */
-  void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType type, 
byte[] expr,
+  default void getFileMetadataByExpr(List<Long> fileIds, FileMetadataExprType 
type, byte[] expr,
       ByteBuffer[] metadatas, ByteBuffer[] exprResults, boolean[] eliminated)
-          throws MetaException;
+          throws MetaException {
+    throw new UnsupportedOperationException();
+  }
 
   /** Gets file metadata handler for the corresponding type. */
-  FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type);
+  default FileMetadataHandler getFileMetadataHandler(FileMetadataExprType 
type) {
+    throw new UnsupportedOperationException();
+  }
 
   /**
    * Gets total number of tables.
@@ -1823,19 +1616,6 @@ void getFileMetadataByExpr(List<Long> fileIds, 
FileMetadataExprType type, byte[]
   @InterfaceStability.Evolving
   int getDatabaseCount() throws MetaException;
 
-  /**
-   * Get the primary associated with a table.  Strangely enough each 
SQLPrimaryKey is actually a
-   * column in they key, not the key itself.  Thus the list.
-   * @param catName catalog name
-   * @param db_name database name
-   * @param tbl_name table name
-   * @return list of primary key columns or an empty list if the table does 
not have a primary key
-   * @throws MetaException error accessing the RDBMS
-   */
-  @Deprecated
-  List<SQLPrimaryKey> getPrimaryKeys(String catName, String db_name, String 
tbl_name)
-      throws MetaException;
-
   /**
    * SQLPrimaryKey represents a single primary key column.
    * Since a table can have one or more primary keys ( in case of composite 
primary key ),
@@ -1847,24 +1627,6 @@ List<SQLPrimaryKey> getPrimaryKeys(String catName, 
String db_name, String tbl_na
   List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest request)
       throws MetaException;
 
-  /**
-   * Get the foreign keys for a table.  All foreign keys for a particular 
table can be fetched by
-   * passing null for the last two arguments.
-   * @param catName catalog name.
-   * @param parent_db_name Database the table referred to is in.  This can be 
null to match all
-   *                       databases.
-   * @param parent_tbl_name Table that is referred to.  This can be null to 
match all tables.
-   * @param foreign_db_name Database the table with the foreign key is in.
-   * @param foreign_tbl_name Table with the foreign key.
-   * @return List of all matching foreign key columns.  Note that if more than 
one foreign key
-   * matches the arguments the results here will be all mixed together into a 
single list.
-   * @throws MetaException error access the RDBMS.
-   */
-  @Deprecated
-  List<SQLForeignKey> getForeignKeys(String catName, String parent_db_name,
-    String parent_tbl_name, String foreign_db_name, String foreign_tbl_name)
-    throws MetaException;
-
   /**
    * SQLForeignKey represents a single foreign key column.
    * Since a table can have one or more foreign keys ( in case of composite 
foreign key ),
@@ -1877,18 +1639,6 @@ List<SQLForeignKey> getForeignKeys(String catName, 
String parent_db_name,
   List<SQLForeignKey> getForeignKeys(ForeignKeysRequest request)
       throws MetaException;
 
-  /**
-   * Get unique constraints associated with a table.
-   * @param catName catalog name.
-   * @param db_name database name.
-   * @param tbl_name table name.
-   * @return list of unique constraints
-   * @throws MetaException error access the RDBMS.
-   */
-  @Deprecated
-  List<SQLUniqueConstraint> getUniqueConstraints(String catName, String 
db_name,
-    String tbl_name) throws MetaException;
-
   /**
    * SQLUniqueConstraint represents a single unique constraint column.
    * Since a table can have one or more unique constraint ( in case of 
composite unique constraint ),
@@ -1899,18 +1649,6 @@ List<SQLUniqueConstraint> getUniqueConstraints(String 
catName, String db_name,
    */
   List<SQLUniqueConstraint> getUniqueConstraints(UniqueConstraintsRequest 
request) throws MetaException;
 
-  /**
-   * Get not null constraints on a table.
-   * @param catName catalog name.
-   * @param db_name database name.
-   * @param tbl_name table name.
-   * @return list of not null constraints
-   * @throws MetaException error accessing the RDBMS.
-   */
-  @Deprecated
-  List<SQLNotNullConstraint> getNotNullConstraints(String catName, String 
db_name,
-    String tbl_name) throws MetaException;
-
   /**
    * SQLNotNullConstraint represents a single not null constraint column.
    * Since a table can have one or more not null constraint ( in case of 
composite not null constraint ),
@@ -1921,18 +1659,6 @@ List<SQLNotNullConstraint> getNotNullConstraints(String 
catName, String db_name,
    */
   List<SQLNotNullConstraint> getNotNullConstraints(NotNullConstraintsRequest 
request) throws MetaException;
 
-  /**
-   * Get default values for columns in a table.
-   * @param catName catalog name
-   * @param db_name database name
-   * @param tbl_name table name
-   * @return list of default values defined on the table.
-   * @throws MetaException error accessing the RDBMS
-   */
-  @Deprecated
-  List<SQLDefaultConstraint> getDefaultConstraints(String catName, String 
db_name,
-                                                   String tbl_name) throws 
MetaException;
-
   /**
    * SQLDefaultConstraint represents a single default constraint column.
    * Since a table can have one or more default constraint ( in case of 
composite default constraint ),
@@ -1943,18 +1669,6 @@ List<SQLDefaultConstraint> getDefaultConstraints(String 
catName, String db_name,
    */
   List<SQLDefaultConstraint> getDefaultConstraints(DefaultConstraintsRequest 
request) throws MetaException;
 
-  /**
-   * Get check constraints for columns in a table.
-   * @param catName catalog name.
-   * @param db_name database name
-   * @param tbl_name table name
-   * @return ccheck constraints for this table
-   * @throws MetaException error accessing the RDBMS
-   */
-  @Deprecated
-  List<SQLCheckConstraint> getCheckConstraints(String catName, String db_name,
-                                                   String tbl_name) throws 
MetaException;
-
   /**
    * SQLCheckConstraint represents a single check constraint column.
    * Since a table can have one or more check constraint ( in case of 
composite check constraint ),
@@ -1965,18 +1679,6 @@ List<SQLCheckConstraint> getCheckConstraints(String 
catName, String db_name,
    */
   List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest 
request) throws MetaException;
 
-  /**
-   * Get all constraints of the table
-   * @param catName catalog name
-   * @param dbName database name
-   * @param tblName table name
-   * @return all constraints for this table
-   * @throws MetaException error accessing the RDBMS
-   */
-  @Deprecated
-  SQLAllTableConstraints getAllTableConstraints(String catName, String dbName, 
String tblName)
-      throws MetaException, NoSuchObjectException;
-
   /**
    * Get table constraints
    * @param request AllTableConstraintsRequest object
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
index 45655d83fd4..786cdd615fc 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/TransactionalValidationListener.java
@@ -38,6 +38,7 @@
 import org.apache.hadoop.hive.metastore.api.StorageDescriptor;
 import org.apache.hadoop.hive.metastore.api.Table;
 import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
 import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent;
 import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent;
@@ -476,8 +477,8 @@ private List<Partition> getTablePartitions(IHMSHandler 
hmsHandler, Table table)
     try {
       RawStore rawStore = hmsHandler.getMS();
       String catName = getTableCatalog(table);
-      List<Partition> partitions = rawStore.getPartitions(catName, 
table.getDbName(), table.getTableName(), -1);
-      return partitions;
+      return rawStore.getPartitions(catName, table.getDbName(), 
table.getTableName(),
+          GetPartitionsArgs.getAllPartitions());
     } catch (Exception err) {
       String msg = "Error getting partitions for " + 
Warehouse.getQualifiedName(table);
       LOG.error(msg, err);
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
index fcd6779054e..e1ad410417b 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/cache/CachedStore.java
@@ -17,19 +17,16 @@
  */
 package org.apache.hadoop.hive.metastore.cache;
 
-import java.nio.ByteBuffer;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.EmptyStackException;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Objects;
 import java.util.Optional;
-import java.util.Set;
 import java.util.Stack;
 import java.util.concurrent.Executors;
 import java.util.concurrent.ScheduledExecutorService;
@@ -49,7 +46,6 @@
 import org.apache.hadoop.hive.common.StatsSetupConst;
 import org.apache.hadoop.hive.common.TableName;
 import org.apache.hadoop.hive.metastore.Deadline;
-import org.apache.hadoop.hive.metastore.FileMetadataHandler;
 import org.apache.hadoop.hive.metastore.ObjectStore;
 import org.apache.hadoop.hive.metastore.PartFilterExprUtil;
 import org.apache.hadoop.hive.metastore.PartitionExpressionProxy;
@@ -68,7 +64,6 @@
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
 import org.apache.hadoop.hive.metastore.messaging.*;
 import org.apache.hadoop.hive.metastore.model.MTable;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.txn.TxnUtils;
 import org.apache.hadoop.hive.metastore.utils.FileUtils;
 import org.apache.hadoop.hive.metastore.utils.JavaUtils;
@@ -534,7 +529,7 @@ static void prewarm(RawStore rawStore) {
               TableCacheObjects cacheObjects = new TableCacheObjects();
               if (!table.getPartitionKeys().isEmpty()) {
                 Deadline.startTimer("getPartitions");
-                partitions = rawStore.getPartitions(catName, dbName, tblName, 
-1);
+                partitions = rawStore.getPartitions(catName, dbName, tblName, 
GetPartitionsArgs.getAllPartitions());
                 Deadline.stopTimer();
                 cacheObjects.setPartitions(partitions);
                 List<String> partNames = new ArrayList<>(partitions.size());
@@ -938,7 +933,8 @@ private void updateTablePartitions(RawStore rawStore, 
String catName, String dbN
           dbName, tblName);
       try {
         Deadline.startTimer("getPartitions");
-        List<Partition> partitions = rawStore.getPartitions(catName, dbName, 
tblName, -1);
+        List<Partition> partitions =
+            rawStore.getPartitions(catName, dbName, tblName, 
GetPartitionsArgs.getAllPartitions());
         Deadline.stopTimer();
         sharedCache
             
.refreshPartitionsInCache(StringUtils.normalizeIdentifier(catName), 
StringUtils.normalizeIdentifier(dbName),
@@ -1381,26 +1377,6 @@ public Table getTable(String catName, String dbName, 
String tblName, String vali
     return succ;
   }
 
-  @Override public boolean addPartitions(String catName, String dbName, String 
tblName,
-      PartitionSpecProxy partitionSpec, boolean ifNotExists) throws 
InvalidObjectException, MetaException {
-    boolean succ = rawStore.addPartitions(catName, dbName, tblName, 
partitionSpec, ifNotExists);
-    // in case of event based cache update, cache will be updated during 
commit.
-    if (succ && !canUseEvents) {
-      catName = normalizeIdentifier(catName);
-      dbName = normalizeIdentifier(dbName);
-      tblName = normalizeIdentifier(tblName);
-      if (!shouldCacheTable(catName, dbName, tblName)) {
-        return succ;
-      }
-      PartitionSpecProxy.PartitionIterator iterator = 
partitionSpec.getPartitionIterator();
-      while (iterator.hasNext()) {
-        Partition part = iterator.next();
-        sharedCache.addPartitionToCache(catName, dbName, tblName, part);
-      }
-    }
-    return succ;
-  }
-
   @Override public Partition getPartition(String catName, String dbName, 
String tblName, List<String> partVals)
       throws MetaException, NoSuchObjectException {
     return getPartition(catName, dbName, tblName, partVals, null);
@@ -1448,22 +1424,6 @@ public Table getTable(String catName, String dbName, 
String tblName, String vali
     return sharedCache.existPartitionFromCache(catName, dbName, tblName, 
partVals);
   }
 
-  @Override public boolean dropPartition(String catName, String dbName, String 
tblName, List<String> partVals)
-      throws MetaException, NoSuchObjectException, InvalidObjectException, 
InvalidInputException {
-    boolean succ = rawStore.dropPartition(catName, dbName, tblName, partVals);
-    // in case of event based cache update, cache will be updated during 
commit.
-    if (succ && !canUseEvents) {
-      catName = normalizeIdentifier(catName);
-      dbName = normalizeIdentifier(dbName);
-      tblName = normalizeIdentifier(tblName);
-      if (!shouldCacheTable(catName, dbName, tblName)) {
-        return succ;
-      }
-      sharedCache.removePartitionFromCache(catName, dbName, tblName, partVals);
-    }
-    return succ;
-  }
-
   @Override public boolean dropPartition(String catName, String dbName, String 
tblName, String partName)
       throws MetaException, NoSuchObjectException, InvalidObjectException, 
InvalidInputException {
     boolean succ = rawStore.dropPartition(catName, dbName, tblName, partName);
@@ -1773,25 +1733,6 @@ public List<Partition> getPartitionsByFilter(String 
catName, String dbName, Stri
     return rawStore.getNumPartitionsByFilter(catName, dbName, tblName, filter);
   }
 
-  @Override public int getNumPartitionsByExpr(String catName, String dbName, 
String tblName, byte[] expr)
-      throws MetaException, NoSuchObjectException {
-    catName = normalizeIdentifier(catName);
-    dbName = StringUtils.normalizeIdentifier(dbName);
-    tblName = StringUtils.normalizeIdentifier(tblName);
-    if (!shouldCacheTable(catName, dbName, tblName) || (canUseEvents && 
rawStore.isActiveTransaction())) {
-      return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
-    }
-    String defaultPartName = MetastoreConf.getVar(getConf(), 
ConfVars.DEFAULTPARTITIONNAME);
-    List<String> partNames = new LinkedList<>();
-    Table table = sharedCache.getTableFromCache(catName, dbName, tblName);
-    if (table == null) {
-      // The table is not yet loaded in cache
-      return rawStore.getNumPartitionsByExpr(catName, dbName, tblName, expr);
-    }
-    getPartitionNamesPrunedByExprNoTxn(table, expr, defaultPartName, 
Short.MAX_VALUE, partNames, sharedCache);
-    return partNames.size();
-  }
-
   @VisibleForTesting public static List<String> partNameToVals(String name) {
     if (name == null) {
       return null;
@@ -2201,25 +2142,6 @@ private static void 
updateTableColumnsStatsInternal(Configuration conf, ColumnSt
     return columnStatistics;
   }
 
-  @Override public boolean deleteTableColumnStatistics(String catName, String 
dbName, String tblName, String colName, String engine)
-      throws NoSuchObjectException, MetaException, InvalidObjectException, 
InvalidInputException {
-    if (!CacheUtils.HIVE_ENGINE.equals(engine)) {
-      throw new RuntimeException("CachedStore can only be enabled for Hive 
engine");
-    }
-    boolean succ = rawStore.deleteTableColumnStatistics(catName, dbName, 
tblName, colName, engine);
-    // in case of event based cache update, cache is updated during commit txn
-    if (succ && !canUseEvents) {
-      catName = normalizeIdentifier(catName);
-      dbName = normalizeIdentifier(dbName);
-      tblName = normalizeIdentifier(tblName);
-      if (!shouldCacheTable(catName, dbName, tblName)) {
-        return succ;
-      }
-      sharedCache.removeTableColStatsFromCache(catName, dbName, tblName, 
colName);
-    }
-    return succ;
-  }
-
   @Override public boolean deleteTableColumnStatistics(String catName, String 
dbName, String tblName, List<String> colNames, String engine)
           throws NoSuchObjectException, MetaException, InvalidObjectException, 
InvalidInputException {
     if (!CacheUtils.HIVE_ENGINE.equals(engine)) {
@@ -2345,26 +2267,6 @@ private Map<String, String> 
updatePartitionColumnStatisticsInternal(Table table,
     return columnStatistics;
   }
 
-  @Override public boolean deletePartitionColumnStatistics(String catName, 
String dbName, String tblName,
-      String partName, List<String> partVals, String colName, String engine)
-      throws NoSuchObjectException, MetaException, InvalidObjectException, 
InvalidInputException {
-    if (!CacheUtils.HIVE_ENGINE.equals(engine)) {
-      throw new RuntimeException("CachedStore can only be enabled for Hive 
engine");
-    }
-    boolean succ = rawStore.deletePartitionColumnStatistics(catName, dbName, 
tblName, partName, partVals, colName, engine);
-    // in case of event based cache update, cache is updated during commit txn.
-    if (succ && !canUseEvents) {
-      catName = normalizeIdentifier(catName);
-      dbName = normalizeIdentifier(dbName);
-      tblName = normalizeIdentifier(tblName);
-      if (!shouldCacheTable(catName, dbName, tblName)) {
-        return succ;
-      }
-      sharedCache.removePartitionColStatsFromCache(catName, dbName, tblName, 
partVals, colName);
-    }
-    return succ;
-  }
-
   @Override public boolean deletePartitionColumnStatistics(String catName, 
String dbName, String tblName,
                                                            List<String> 
partNames, List<String> colNames, String engine)
           throws NoSuchObjectException, MetaException, InvalidObjectException, 
InvalidInputException {
@@ -2688,11 +2590,6 @@ long getPartsFound() {
     return rawStore.getAllFunctions(catName);
   }
 
-  @Override public List<String> getFunctions(String catName, String dbName, 
String pattern) throws MetaException {
-    // TODO functionCache
-    return rawStore.getFunctions(catName, dbName, pattern);
-  }
-
   @Override public <T> List<T> getFunctionsRequest(String catName, String 
dbName,
       String pattern, boolean isReturnNames) throws MetaException {
     return rawStore.getFunctionsRequest(catName, dbName, pattern, 
isReturnNames);
@@ -2718,32 +2615,6 @@ long getPartsFound() {
     return rawStore.getNotificationEventsCount(rqst);
   }
 
-  @Override public void flushCache() {
-    rawStore.flushCache();
-  }
-
-  @Override public ByteBuffer[] getFileMetadata(List<Long> fileIds) throws 
MetaException {
-    return rawStore.getFileMetadata(fileIds);
-  }
-
-  @Override public void putFileMetadata(List<Long> fileIds, List<ByteBuffer> 
metadata, FileMetadataExprType type)
-      throws MetaException {
-    rawStore.putFileMetadata(fileIds, metadata, type);
-  }
-
-  @Override public boolean isFileMetadataSupported() {
-    return rawStore.isFileMetadataSupported();
-  }
-
-  @Override public void getFileMetadataByExpr(List<Long> fileIds, 
FileMetadataExprType type, byte[] expr,
-      ByteBuffer[] metadatas, ByteBuffer[] exprResults, boolean[] eliminated) 
throws MetaException {
-    rawStore.getFileMetadataByExpr(fileIds, type, expr, metadatas, 
exprResults, eliminated);
-  }
-
-  @Override public FileMetadataHandler 
getFileMetadataHandler(FileMetadataExprType type) {
-    return rawStore.getFileMetadataHandler(type);
-  }
-
   @Override public int getTableCount() throws MetaException {
     return rawStore.getTableCount();
   }
@@ -2756,14 +2627,6 @@ long getPartsFound() {
     return rawStore.getDatabaseCount();
   }
 
-  @Override
-  @Deprecated
-  public List<SQLPrimaryKey> getPrimaryKeys(String catName, String dbName, 
String tblName) throws MetaException {
-    PrimaryKeysRequest request = new PrimaryKeysRequest(dbName, tblName);
-    request.setCatName(catName);
-    return getPrimaryKeys(request);
-  }
-
   @Override
   public List<SQLPrimaryKey> getPrimaryKeys(PrimaryKeysRequest request) throws 
MetaException {
     String catName = StringUtils.normalizeIdentifier(request.getCatName());
@@ -2775,15 +2638,6 @@ public List<SQLPrimaryKey> 
getPrimaryKeys(PrimaryKeysRequest request) throws Met
     return sharedCache.listCachedPrimaryKeys(catName, dbName, tblName);
   }
 
-  @Override
-  @Deprecated
-  public List<SQLForeignKey> getForeignKeys(String catName, String 
parentDbName, String parentTblName,
-      String foreignDbName, String foreignTblName) throws MetaException {
-    ForeignKeysRequest request = new ForeignKeysRequest(parentDbName, 
parentTblName, foreignDbName, foreignTblName);
-    request.setCatName(catName);
-    return getForeignKeys(request);
-  }
-
   @Override
   public List<SQLForeignKey> getForeignKeys(ForeignKeysRequest request) throws 
MetaException {
     // Get correct ForeignDBName and TableName
@@ -2806,14 +2660,6 @@ public List<SQLForeignKey> 
getForeignKeys(ForeignKeysRequest request) throws Met
     return sharedCache.listCachedForeignKeys(catName, foreignDbName, 
foreignTblName, parentDbName, parentTblName);
   }
 
-  @Override
-  @Deprecated
-  public List<SQLUniqueConstraint> getUniqueConstraints(String catName, String 
dbName, String tblName)
-      throws MetaException {
-    UniqueConstraintsRequest request = new UniqueConstraintsRequest(catName, 
dbName, tblName);
-    return getUniqueConstraints(request);
-  }
-
   @Override
   public List<SQLUniqueConstraint> 
getUniqueConstraints(UniqueConstraintsRequest request) throws MetaException {
     String catName = StringUtils.normalizeIdentifier(request.getCatName());
@@ -2825,14 +2671,6 @@ public List<SQLUniqueConstraint> 
getUniqueConstraints(UniqueConstraintsRequest r
     return sharedCache.listCachedUniqueConstraint(catName, dbName, tblName);
   }
 
-  @Override
-  @Deprecated
-  public List<SQLNotNullConstraint> getNotNullConstraints(String catName, 
String dbName, String tblName)
-      throws MetaException {
-    NotNullConstraintsRequest request = new NotNullConstraintsRequest(catName, 
dbName, tblName);
-    return getNotNullConstraints(request);
-  }
-
   @Override
   public List<SQLNotNullConstraint> 
getNotNullConstraints(NotNullConstraintsRequest request) throws MetaException {
     String catName = normalizeIdentifier(request.getCatName());
@@ -2844,22 +2682,6 @@ public List<SQLNotNullConstraint> 
getNotNullConstraints(NotNullConstraintsReques
     return sharedCache.listCachedNotNullConstraints(catName, dbName, tblName);
   }
 
-  /**
-   * Get default Constraints from cache Store if not present then fetch from 
raw store
-   * @param catName catalog name
-   * @param dbName database name
-   * @param tblName table name
-   * @return list of default constraints for given table
-   * @throws MetaException
-   */
-  @Override
-  @Deprecated
-  public List<SQLDefaultConstraint> getDefaultConstraints(String catName, 
String dbName, String tblName)
-      throws MetaException {
-    DefaultConstraintsRequest request = new DefaultConstraintsRequest(catName, 
dbName, tblName);
-    return getDefaultConstraints(request);
-  }
-
   @Override
   public List<SQLDefaultConstraint> 
getDefaultConstraints(DefaultConstraintsRequest request) throws MetaException {
     String catName = StringUtils.normalizeIdentifier(request.getCatName());
@@ -2871,22 +2693,6 @@ public List<SQLDefaultConstraint> 
getDefaultConstraints(DefaultConstraintsReques
     return sharedCache.listCachedDefaultConstraint(catName, dbName, tblName);
   }
 
-  /**
-   * Get check Constraints from cache Store if not present then fetch from raw 
store
-   * @param catName catalog name
-   * @param dbName database name
-   * @param tblName table name
-   * @return list of check constraints for given table
-   * @throws MetaException
-   */
-  @Override
-  @Deprecated
-  public List<SQLCheckConstraint> getCheckConstraints(String catName, String 
dbName, String tblName)
-      throws MetaException {
-    CheckConstraintsRequest request = new CheckConstraintsRequest(catName, 
dbName, tblName);
-    return getCheckConstraints(request);
-  }
-
   @Override
   public List<SQLCheckConstraint> getCheckConstraints(CheckConstraintsRequest 
request) throws MetaException {
     String catName = StringUtils.normalizeIdentifier(request.getCatName());
@@ -2898,22 +2704,6 @@ public List<SQLCheckConstraint> 
getCheckConstraints(CheckConstraintsRequest requ
     return sharedCache.listCachedCheckConstraint(catName, dbName, tblName);
   }
 
-  /**
-   * Method to fetch all table constraints at once
-   * @param catName catalog name
-   * @param dbName database name
-   * @param tblName table name
-   * @return list of all table constraints
-   * @throws MetaException
-   */
-  @Override
-  @Deprecated
-  public SQLAllTableConstraints getAllTableConstraints(String catName, String 
dbName, String tblName)
-      throws MetaException, NoSuchObjectException {
-    AllTableConstraintsRequest request = new 
AllTableConstraintsRequest(dbName,tblName,catName);
-    return getAllTableConstraints(request);
-  }
-
   @Override
   public SQLAllTableConstraints 
getAllTableConstraints(AllTableConstraintsRequest request)
       throws MetaException, NoSuchObjectException {
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
index 6eb6df1d6d7..627e10ade3f 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/client/builder/GetPartitionsArgs.java
@@ -182,4 +182,8 @@ public GetPartitionsArgs build() {
       return additionalArgs;
     }
   }
+
+  public static GetPartitionsArgs getAllPartitions() {
+    return new GetPartitionsArgsBuilder().max(-1).build();
+  }
 }
diff --git 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
index 6606634a116..84fee0c8fd8 100644
--- 
a/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
+++ 
b/standalone-metastore/metastore-server/src/main/java/org/apache/hadoop/hive/metastore/utils/MetaStoreServerUtils.java
@@ -997,10 +997,6 @@ public static int findFreePortExcepting(int portToExclude) 
throws IOException {
     }
   }
 
-  public static String getIndexTableName(String dbName, String baseTblName, 
String indexName) {
-    return dbName + "__" + baseTblName + "_" + indexName + "__";
-  }
-
   static public String validateTblColumns(List<FieldSchema> cols) {
     for (FieldSchema fieldSchema : cols) {
       // skip this, as validateColumnName always returns true
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
index ae96fdaf67b..07af321300e 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestObjectStore.java
@@ -37,7 +37,7 @@
 import org.apache.hadoop.hive.metastore.api.AggrStats;
 import org.apache.hadoop.hive.metastore.api.DropPackageRequest;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.Function;
+import org.apache.hadoop.hive.metastore.api.ForeignKeysRequest;
 import org.apache.hadoop.hive.metastore.api.GetPackageRequest;
 import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege;
 import org.apache.hadoop.hive.metastore.api.HiveObjectRef;
@@ -52,8 +52,7 @@
 import org.apache.hadoop.hive.metastore.api.NotificationEventResponse;
 import org.apache.hadoop.hive.metastore.api.Package;
 import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionListComposingSpec;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
+import org.apache.hadoop.hive.metastore.api.PrimaryKeysRequest;
 import org.apache.hadoop.hive.metastore.api.PrincipalType;
 import org.apache.hadoop.hive.metastore.api.PrivilegeBag;
 import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo;
@@ -81,7 +80,6 @@
 import org.apache.hadoop.hive.metastore.model.MNotificationLog;
 import org.apache.hadoop.hive.metastore.model.MNotificationNextId;
 import org.apache.hadoop.hive.metastore.model.MTable;
-import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy;
 import org.apache.hadoop.hive.metastore.utils.MetaStoreServerUtils;
 import org.junit.Assert;
 import org.junit.Assume;
@@ -106,12 +104,10 @@
 import java.util.Arrays;
 import java.util.Collections;
 import java.util.HashMap;
-import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.Map;
 import java.util.Random;
-import java.util.Set;
 import java.util.UUID;
 import java.util.concurrent.BrokenBarrierException;
 import java.util.concurrent.Callable;
@@ -388,7 +384,7 @@ public void testTableOps() throws MetaException, 
InvalidObjectException, NoSuchO
     tables = objectStore.getAllTables(DEFAULT_CATALOG_NAME, DB1);
     Assert.assertEquals(2, tables.size());
 
-    List<SQLForeignKey> foreignKeys = 
objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, TABLE1, null, null);
+    List<SQLForeignKey> foreignKeys = 
objectStore.getForeignKeys(newForeignKeysRequest(DB1, TABLE1, null, null));
     Assert.assertEquals(0, foreignKeys.size());
 
     SQLPrimaryKey pk = new SQLPrimaryKey(DB1, TABLE1, "pk_col", 1,
@@ -401,10 +397,10 @@ public void testTableOps() throws MetaException, 
InvalidObjectException, NoSuchO
     objectStore.addForeignKeys(ImmutableList.of(fk));
 
     // Retrieve from PK side
-    foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, 
DB1, "new" + TABLE1);
+    foreignKeys = objectStore.getForeignKeys(newForeignKeysRequest(null, null, 
DB1, "new" + TABLE1));
     Assert.assertEquals(1, foreignKeys.size());
 
-    List<SQLForeignKey> fks = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, 
null, null, DB1, "new" + TABLE1);
+    List<SQLForeignKey> fks = 
objectStore.getForeignKeys(newForeignKeysRequest(null, null, DB1, "new" + 
TABLE1));
     if (fks != null) {
       for (SQLForeignKey fkcol : fks) {
         objectStore.dropConstraint(fkcol.getCatName(), fkcol.getFktable_db(), 
fkcol.getFktable_name(),
@@ -412,10 +408,10 @@ public void testTableOps() throws MetaException, 
InvalidObjectException, NoSuchO
       }
     }
     // Retrieve from FK side
-    foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, null, null);
+    foreignKeys = objectStore.getForeignKeys(newForeignKeysRequest(DB1, 
TABLE1, null, null));
     Assert.assertEquals(0, foreignKeys.size());
     // Retrieve from PK side
-    foreignKeys = objectStore.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, 
DB1, "new" + TABLE1);
+    foreignKeys = objectStore.getForeignKeys(newForeignKeysRequest(null, null, 
DB1, "new" + TABLE1));
     Assert.assertEquals(0, foreignKeys.size());
 
     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
@@ -429,6 +425,17 @@ public void testTableOps() throws MetaException, 
InvalidObjectException, NoSuchO
     objectStore.dropDatabase(db1.getCatalogName(), DB1);
   }
 
+  static ForeignKeysRequest newForeignKeysRequest(String parentDb, String 
parentTbl,
+      String childDb, String childTbl) {
+    ForeignKeysRequest request = new ForeignKeysRequest(parentDb, parentTbl, 
childDb, childTbl);
+    request.setCatName(DEFAULT_CATALOG_NAME);
+    return request;
+  }
+
+  static GetPartitionsArgs limitGetPartitions(int max) {
+    return new GetPartitionsArgs.GetPartitionsArgsBuilder().max(max).build();
+  }
+
   @Test (expected = NoSuchObjectException.class)
   public void testTableOpsWhenTableDoesNotExist() throws 
NoSuchObjectException, MetaException {
     List<String> colNames = Arrays.asList("c0", "c1");
@@ -482,7 +489,7 @@ public void testPartitionOps() throws Exception {
 
     List<Partition> partitions;
     try (AutoCloseable c = deadline()) {
-      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, 10);
+      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, limitGetPartitions(10));
     }
     Assert.assertEquals(2, partitions.size());
     Assert.assertEquals(111, partitions.get(0).getCreateTime());
@@ -512,14 +519,16 @@ public void testPartitionOps() throws Exception {
     Assert.assertEquals(2, numPartitions);
 
     try (AutoCloseable c = deadline()) {
-      objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value1);
-      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, 10);
+      objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+          Warehouse.makePartName(tbl1.getPartitionKeys(), value1));
+      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, limitGetPartitions(10));
     }
     Assert.assertEquals(1, partitions.size());
     Assert.assertEquals(222, partitions.get(0).getCreateTime());
 
     try (AutoCloseable c = deadline()) {
-      objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, value2);
+      objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1,
+          Warehouse.makePartName(tbl1.getPartitionKeys(), value2));
       objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
       objectStore.dropDatabase(db1.getCatalogName(), DB1);
     }
@@ -553,15 +562,6 @@ public void testPartitionOpsWhenTableDoesNotExist() throws 
InvalidObjectExceptio
       // expected
     }
 
-    PartitionSpec partitionSpec1 = new PartitionSpec(DB1, "not_existed_table", 
"location1");
-    partitionSpec1.setPartitionList(new PartitionListComposingSpec(parts));
-    PartitionSpecProxy partitionSpecProxy = 
PartitionSpecProxy.Factory.get(Arrays.asList(partitionSpec1));
-    try {
-      objectStore.addPartitions(DEFAULT_CATALOG_NAME, DB1, 
"not_existed_table", partitionSpecProxy, true);
-    } catch (InvalidObjectException e) {
-      // expected
-    }
-
     List<List<String>> part_vals = Arrays.asList(Arrays.asList("US", "GA"), 
Arrays.asList("US", "WA"));
     try {
       objectStore.alterPartitions(DEFAULT_CATALOG_NAME, DB1, 
"not_existed_table", part_vals, parts, 0, "");
@@ -678,13 +678,13 @@ public void testDropPartitionByName() throws Exception {
     List<Partition> partitions;
     try (AutoCloseable c = deadline()) {
       objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
"country=US/state=CA");
-      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, 10);
+      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, limitGetPartitions(10));
     }
     Assert.assertEquals(1, partitions.size());
     Assert.assertEquals(222, partitions.get(0).getCreateTime());
     try (AutoCloseable c = deadline()) {
       objectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
"country=US/state=MA");
-      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, 10);
+      partitions = objectStore.getPartitions(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, limitGetPartitions(10));
     }
     Assert.assertEquals(0, partitions.size());
 
@@ -750,7 +750,8 @@ public void testConcurrentDropPartitions() throws 
MetaException, InvalidObjectEx
           threadObjectStore.setConf(conf);
           for (List<String> p : partNames) {
             try {
-              threadObjectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, 
TABLE1, p);
+              threadObjectStore.dropPartition(DEFAULT_CATALOG_NAME, DB1, 
TABLE1,
+                  Warehouse.makePartName(tbl1.getPartitionKeys(), p));
               System.out.println("Dropping partition: " + p.get(0));
             } catch (Exception e) {
               throw new RuntimeException(e);
@@ -939,7 +940,7 @@ public void testTableStatisticsOps() throws Exception {
     Assert.assertEquals(1, tabColStats.size());
     Assert.assertEquals(2, tabColStats.get(0).getStatsObjSize());
 
-    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
"test_col1", ENGINE);
+    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
Arrays.asList("test_col1"), ENGINE);
     try (AutoCloseable c = deadline()) {
       tabColStats = objectStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
           Arrays.asList("test_col1", "test_col' 2"));
@@ -947,7 +948,7 @@ public void testTableStatisticsOps() throws Exception {
     Assert.assertEquals(1, tabColStats.size());
     Assert.assertEquals(1, tabColStats.get(0).getStatsObjSize());
 
-    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
"test_col' 2", ENGINE);
+    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
Arrays.asList("test_col' 2"), ENGINE);
     try (AutoCloseable c = deadline()) {
       tabColStats = objectStore.getTableColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
           Arrays.asList("test_col1", "test_col' 2"));
@@ -958,7 +959,7 @@ public void testTableStatisticsOps() throws Exception {
   @Test
   public void testDeleteTableColumnStatisticsWhenEngineHasSpecialCharacter() 
throws Exception {
     createPartitionedTable(true, true);
-    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
"test_col1", "special '");
+    objectStore.deleteTableColumnStatistics(DEFAULT_CATALOG_NAME, DB1, TABLE1, 
Arrays.asList("test_col1"), "special '");
   }
 
   @Test
@@ -983,7 +984,7 @@ public void testPartitionStatisticsOps() throws Exception {
     assertEqualStatistics(expectedStats, computedStats);
 
     objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, DB1, 
TABLE1,
-        "test_part_col=a0", Arrays.asList("a0"), null, ENGINE);
+        Arrays.asList("test_part_col=a0"), null, ENGINE);
     try (AutoCloseable c = deadline()) {
       stat = objectStore.getPartitionColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
           Arrays.asList("test_part_col=a0", "test_part_col=a1", 
"test_part_col=a2"),
@@ -993,7 +994,7 @@ public void testPartitionStatisticsOps() throws Exception {
     Assert.assertEquals(2, stat.get(0).size());
 
     objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, DB1, 
TABLE1,
-        "test_part_col=a1", Arrays.asList("a1"), "test_part_col", null);
+        Arrays.asList("test_part_col=a1"), Arrays.asList("test_part_col"), 
null);
     try (AutoCloseable c = deadline()) {
       stat = objectStore.getPartitionColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
           Arrays.asList("test_part_col=a0", "test_part_col=a1", 
"test_part_col=a2"),
@@ -1003,7 +1004,7 @@ public void testPartitionStatisticsOps() throws Exception 
{
     Assert.assertEquals(1, stat.get(0).size());
 
     objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, DB1, 
TABLE1,
-        "test_part_col=a2", Arrays.asList("a2"), null, null);
+        Arrays.asList("test_part_col=a2"), null, null);
     try (AutoCloseable c = deadline()) {
       stat = objectStore.getPartitionColumnStatistics(DEFAULT_CATALOG_NAME, 
DB1, TABLE1,
           Arrays.asList("test_part_col=a0", "test_part_col=a1", 
"test_part_col=a2"),
@@ -1016,7 +1017,7 @@ public void testPartitionStatisticsOps() throws Exception 
{
   public void 
testDeletePartitionColumnStatisticsWhenEngineHasSpecialCharacter() throws 
Exception {
     createPartitionedTable(true, true);
     objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, DB1, 
TABLE1,
-            "test_part_col=a2", List.of("a2"), null, "special '");
+            List.of("test_part_col=a2"), null, "special '");
   }
 
 
@@ -1259,54 +1260,6 @@ public void 
testLockDbTableThrowsExceptionWhenTableIsNotAllowedToLock() throws E
     metaStoreDirectSql.lockDbTable("TBLS");
   }
 
-  @Deprecated
-  private static void dropAllStoreObjects(RawStore store)
-      throws MetaException, InvalidObjectException, InvalidInputException {
-    try {
-      List<Function> functions = store.getAllFunctions(DEFAULT_CATALOG_NAME);
-      for (Function func : functions) {
-        store.dropFunction(DEFAULT_CATALOG_NAME, func.getDbName(), 
func.getFunctionName());
-      }
-      for (String catName : store.getCatalogs()) {
-        List<String> dbs = store.getAllDatabases(catName);
-        for (String db : dbs) {
-          List<String> tbls = store.getAllTables(DEFAULT_CATALOG_NAME, db);
-          for (String tbl : tbls) {
-            List<Partition> parts = store.getPartitions(DEFAULT_CATALOG_NAME, 
db, tbl, 100);
-            for (Partition part : parts) {
-              store.dropPartition(DEFAULT_CATALOG_NAME, db, tbl, 
part.getValues());
-            }
-            // Find any constraints and drop them
-            Set<String> constraints = new HashSet<>();
-            List<SQLPrimaryKey> pk = 
store.getPrimaryKeys(DEFAULT_CATALOG_NAME, db, tbl);
-            if (pk != null) {
-              for (SQLPrimaryKey pkcol : pk) {
-                constraints.add(pkcol.getPk_name());
-              }
-            }
-            List<SQLForeignKey> fks = 
store.getForeignKeys(DEFAULT_CATALOG_NAME, null, null, db, tbl);
-            if (fks != null) {
-              for (SQLForeignKey fkcol : fks) {
-                constraints.add(fkcol.getFk_name());
-              }
-            }
-            for (String constraint : constraints) {
-              store.dropConstraint(DEFAULT_CATALOG_NAME, db, tbl, constraint);
-            }
-            store.dropTable(DEFAULT_CATALOG_NAME, db, tbl);
-          }
-          store.dropDatabase(catName, db);
-        }
-        store.dropCatalog(catName);
-      }
-      List<String> roles = store.listRoleNames();
-      for (String role : roles) {
-        store.removeRole(role);
-      }
-    } catch (NoSuchObjectException e) {
-    }
-  }
-
   @Test
   public void testQueryCloseOnError() throws Exception {
     ObjectStore spy = Mockito.spy(objectStore);
@@ -1612,8 +1565,10 @@ public void testGetPrimaryKeys() throws Exception {
     objectStore.addPrimaryKeys(ImmutableList.of(pk));
 
     // Primary key retrieval should be success, even if db_name isn't 
specified.
+    PrimaryKeysRequest request = new PrimaryKeysRequest(null, TABLE1);
+    request.setCatName(DEFAULT_CATALOG_NAME);
     assertEquals("pk_col",
-        objectStore.getPrimaryKeys(DEFAULT_CATALOG_NAME, null, TABLE1).get(0)
+        objectStore.getPrimaryKeys(request).get(0)
             .getColumn_name());
     objectStore.dropTable(DEFAULT_CATALOG_NAME, DB1, TABLE1);
     objectStore.dropDatabase(db1.getCatalogName(), DB1);
diff --git 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
index 4b5e1813f0b..9b96f15d29e 100644
--- 
a/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
+++ 
b/standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/cache/TestCachedStore.java
@@ -37,6 +37,7 @@
 import org.apache.hadoop.hive.metastore.annotation.MetastoreCheckinTest;
 import org.apache.hadoop.hive.metastore.api.*;
 import org.apache.hadoop.hive.metastore.client.builder.DatabaseBuilder;
+import org.apache.hadoop.hive.metastore.client.builder.GetPartitionsArgs;
 import org.apache.hadoop.hive.metastore.columnstats.ColStatsBuilder;
 import 
org.apache.hadoop.hive.metastore.columnstats.cache.LongColumnStatsDataInspector;
 import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
@@ -162,19 +163,23 @@
     Assert.assertTrue(db2Tables.contains(db2Ptbl1.getTableName()));
     // cs_db1_ptntbl1
     List<Partition> db1Ptbl1Partitions =
-        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), 
db1Ptbl1.getTableName(), -1);
+        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), 
db1Ptbl1.getTableName(),
+            GetPartitionsArgs.getAllPartitions());
     Assert.assertEquals(25, db1Ptbl1Partitions.size());
     Deadline.startTimer("");
     List<Partition> db1Ptbl1PartitionsOS =
-        objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db1Ptbl1.getTableName(), -1);
+        objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db1Ptbl1.getTableName(),
+            GetPartitionsArgs.getAllPartitions());
     Assert.assertTrue(db1Ptbl1Partitions.containsAll(db1Ptbl1PartitionsOS));
     // cs_db2_ptntbl1
     List<Partition> db2Ptbl1Partitions =
-        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(), -1);
+        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(),
+            GetPartitionsArgs.getAllPartitions());
     Assert.assertEquals(25, db2Ptbl1Partitions.size());
     Deadline.startTimer("");
     List<Partition> db2Ptbl1PartitionsOS =
-        objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(), -1);
+        objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(),
+            GetPartitionsArgs.getAllPartitions());
     Assert.assertTrue(db2Ptbl1Partitions.containsAll(db2Ptbl1PartitionsOS));
     cachedStore.shutdown();
   }
@@ -276,7 +281,8 @@ public void testPrewarmMemoryEstimation() {
     Assert.assertEquals(1, db1Tbls.size());
     Assert.assertTrue(db1Tbls.contains(db1Ptbl1.getTableName()));
     List<Partition> db1Ptns =
-        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), 
db1Ptbl1.getTableName(), -1);
+        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), 
db1Ptbl1.getTableName(),
+            GetPartitionsArgs.getAllPartitions());
     Assert.assertEquals(0, db1Ptns.size());
     // cs_db2_ptntbl1
     List<String> db2Tbls = cachedStore.getAllTables(DEFAULT_CATALOG_NAME, 
db2.getName());
@@ -284,11 +290,13 @@ public void testPrewarmMemoryEstimation() {
     Assert.assertTrue(db2Tbls.contains(db2Utbl1.getTableName()));
     Assert.assertTrue(db2Tbls.contains(db2Ptbl1.getTableName()));
     List<Partition> db2Ptns =
-        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(), -1);
+        cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(),
+            GetPartitionsArgs.getAllPartitions());
     Assert.assertEquals(25, db2Ptns.size());
     Deadline.startTimer("");
     List<Partition> db2PtnsOS =
-        objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(), -1);
+        objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(),
+            GetPartitionsArgs.getAllPartitions());
     Assert.assertTrue(db2Ptns.containsAll(db2PtnsOS));
     // Create a new unpartitioned table under basedb1
     Table db1Utbl2 = createUnpartitionedTableObject(db1);
@@ -317,13 +325,16 @@ public void testPrewarmMemoryEstimation() {
     Assert.assertTrue(db2Tbls.contains(db2Utbl1.getTableName()));
     Assert.assertTrue(db2Tbls.contains(db2Ptbl1.getTableName()));
     // cs_db1_ptntbl1
-    db1Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), 
db1Ptbl1.getTableName(), -1);
+    db1Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db1.getName(), 
db1Ptbl1.getTableName(),
+        GetPartitionsArgs.getAllPartitions());
     Assert.assertEquals(5, db1Ptns.size());
     // cs_db2_ptntbl1
-    db2Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(), -1);
+    db2Ptns = cachedStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(),
+        GetPartitionsArgs.getAllPartitions());
     Assert.assertEquals(25, db2Ptns.size());
     Deadline.startTimer("");
-    db2PtnsOS = objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(), -1);
+    db2PtnsOS = objectStore.getPartitions(DEFAULT_CATALOG_NAME, db2.getName(), 
db2Ptbl1.getTableName(),
+        GetPartitionsArgs.getAllPartitions());
     Assert.assertTrue(db2Ptns.containsAll(db2PtnsOS));
     // Clean up
     objectStore.dropTable(DEFAULT_CATALOG_NAME, db1Utbl2.getDbName(), 
db1Utbl2.getTableName());
@@ -1029,11 +1040,15 @@ public void testAggrStatsRepeatedRead() throws 
Exception {
     Assert.assertEquals(100, 
aggrStats.getColStats().get(0).getStatsData().getLongStats().getNumNulls());
 
     objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, 
db.getName(), tbl.getTableName(),
-        Warehouse.makePartName(tbl.getPartitionKeys(), partVals1), partVals1, 
colName, CacheUtils.HIVE_ENGINE);
+        Arrays.asList(Warehouse.makePartName(tbl.getPartitionKeys(), 
partVals1)), Arrays.asList(colName),
+        CacheUtils.HIVE_ENGINE);
     objectStore.deletePartitionColumnStatistics(DEFAULT_CATALOG_NAME, 
db.getName(), tbl.getTableName(),
-        Warehouse.makePartName(tbl.getPartitionKeys(), partVals2), partVals2, 
colName, CacheUtils.HIVE_ENGINE);
-    objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), 
tbl.getTableName(), partVals1);
-    objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), 
tbl.getTableName(), partVals2);
+        Arrays.asList(Warehouse.makePartName(tbl.getPartitionKeys(), 
partVals2)), Arrays.asList(colName),
+        CacheUtils.HIVE_ENGINE);
+    objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), 
tbl.getTableName(),
+        Warehouse.makePartName(tbl.getPartitionKeys(), partVals1));
+    objectStore.dropPartition(DEFAULT_CATALOG_NAME, db.getName(), 
tbl.getTableName(),
+        Warehouse.makePartName(tbl.getPartitionKeys(), partVals2));
     objectStore.dropTable(DEFAULT_CATALOG_NAME, db.getName(), 
tbl.getTableName());
     objectStore.dropDatabase(DEFAULT_CATALOG_NAME, db.getName());
     cachedStore.shutdown();

Reply via email to