http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/compat-hive-1/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git 
a/fe/src/compat-hive-1/java/org/apache/impala/compat/MetastoreShim.java 
b/fe/src/compat-hive-1/java/org/apache/impala/compat/MetastoreShim.java
new file mode 100644
index 0000000..d0cd351
--- /dev/null
+++ b/fe/src/compat-hive-1/java/org/apache/impala/compat/MetastoreShim.java
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.compat;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hive.service.cli.thrift.TGetColumnsReq;
+import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
+import org.apache.hive.service.cli.thrift.TGetSchemasReq;
+import org.apache.hive.service.cli.thrift.TGetTablesReq;
+import org.apache.impala.authorization.User;
+import org.apache.impala.common.Pair;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.service.MetadataOp;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.thrift.TException;
+
+/**
+ * A wrapper around some of Hive's Metastore API's to abstract away differences
+ * between major versions of Hive. This implements the shimmed methods for 
Hive 2.
+ */
+public class MetastoreShim {
+  /**
+   * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
+   */
+  public static boolean validateName(String name) {
+    return MetaStoreUtils.validateName(name);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partition() to deal with added
+   * arguments.
+   */
+  public static void alterPartition(IMetaStoreClient client, Partition 
partition)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partition(partition.getDbName(), partition.getTableName(), 
partition);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
+   * arguments.
+   */
+  public static void alterPartitions(IMetaStoreClient client, String dbName,
+      String tableName, List<Partition> partitions)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partitions(dbName, tableName, partitions);
+  }
+
+  /**
+   * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with 
added
+   * arguments.
+   */
+  public static void updatePartitionStatsFast(Partition partition, Warehouse 
warehouse)
+      throws MetaException {
+    MetaStoreUtils.updatePartitionStatsFast(partition, warehouse);
+  }
+
+  /**
+   * Return the maximum number of Metastore objects that should be retrieved in
+   * a batch.
+   */
+  public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
+    return 
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX.toString();
+  }
+
+  /**
+   * Return the key and value that should be set in the partition parameters to
+   * mark that the stats were generated automatically by a stats task.
+   */
+  public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
+    return Pair.create(
+        StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK, StatsSetupConst.TRUE);
+  }
+
+  public static TResultSet execGetFunctions(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetFunctionsReq req = request.getGet_functions_req();
+    return MetadataOp.getFunctions(
+        frontend, req.getCatalogName(), req.getSchemaName(), 
req.getFunctionName(), user);
+  }
+
+  public static TResultSet execGetColumns(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetColumnsReq req = request.getGet_columns_req();
+    return MetadataOp.getColumns(frontend, req.getCatalogName(), 
req.getSchemaName(),
+        req.getTableName(), req.getColumnName(), user);
+  }
+
+  public static TResultSet execGetTables(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetTablesReq req = request.getGet_tables_req();
+    return MetadataOp.getTables(frontend, req.getCatalogName(), 
req.getSchemaName(),
+        req.getTableName(), req.getTableTypes(), user);
+  }
+
+  public static TResultSet execGetSchemas(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetSchemasReq req = request.getGet_schemas_req();
+    return MetadataOp.getSchemas(
+        frontend, req.getCatalogName(), req.getSchemaName(), user);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/compat-hive-2/java/org/apache/impala/compat/MetastoreShim.java
----------------------------------------------------------------------
diff --git 
a/fe/src/compat-hive-2/java/org/apache/impala/compat/MetastoreShim.java 
b/fe/src/compat-hive-2/java/org/apache/impala/compat/MetastoreShim.java
new file mode 100644
index 0000000..3d69545
--- /dev/null
+++ b/fe/src/compat-hive-2/java/org/apache/impala/compat/MetastoreShim.java
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.impala.compat;
+
+import java.util.List;
+
+import org.apache.hadoop.hive.common.StatsSetupConst;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+import org.apache.hadoop.hive.metastore.MetaStoreUtils;
+import org.apache.hadoop.hive.metastore.Warehouse;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
+import org.apache.impala.authorization.User;
+import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.Pair;
+import org.apache.impala.service.Frontend;
+import org.apache.impala.service.MetadataOp;
+import org.apache.impala.thrift.TMetadataOpRequest;
+import org.apache.impala.thrift.TResultSet;
+import org.apache.thrift.TException;
+
+/**
+ * A wrapper around some of Hive's Metastore API's to abstract away differences
+ * between major versions of Hive. This implements the shimmed methods for 
Hive 2.
+ */
+public class MetastoreShim {
+  /**
+   * Wrapper around MetaStoreUtils.validateName() to deal with added arguments.
+   */
+  public static boolean validateName(String name) {
+    return MetaStoreUtils.validateName(name, null);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partition() to deal with added
+   * arguments.
+   */
+  public static void alterPartition(IMetaStoreClient client, Partition 
partition)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partition(
+        partition.getDbName(), partition.getTableName(), partition, null);
+  }
+
+  /**
+   * Wrapper around IMetaStoreClient.alter_partitions() to deal with added
+   * arguments.
+   */
+  public static void alterPartitions(IMetaStoreClient client, String dbName,
+      String tableName, List<Partition> partitions)
+      throws InvalidOperationException, MetaException, TException {
+    client.alter_partitions(dbName, tableName, partitions, null);
+  }
+
+  /**
+   * Wrapper around MetaStoreUtils.updatePartitionStatsFast() to deal with 
added
+   * arguments.
+   */
+  public static void updatePartitionStatsFast(Partition partition, Warehouse 
warehouse)
+      throws MetaException {
+    MetaStoreUtils.updatePartitionStatsFast(partition, warehouse, null);
+  }
+
+  /**
+   * Return the maximum number of Metastore objects that should be retrieved in
+   * a batch.
+   */
+  public static String metastoreBatchRetrieveObjectsMaxConfigKey() {
+    return HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX.toString();
+  }
+
+  /**
+   * Return the key and value that should be set in the partition parameters to
+   * mark that the stats were generated automatically by a stats task.
+   */
+  public static Pair<String, String> statsGeneratedViaStatsTaskParam() {
+    return Pair.create(StatsSetupConst.STATS_GENERATED, StatsSetupConst.TASK);
+  }
+
+  public static TResultSet execGetFunctions(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetFunctionsReq req = request.getGet_functions_req();
+    return MetadataOp.getFunctions(
+        frontend, req.getCatalogName(), req.getSchemaName(), 
req.getFunctionName(), user);
+  }
+
+  public static TResultSet execGetColumns(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetColumnsReq req = request.getGet_columns_req();
+    return MetadataOp.getColumns(frontend, req.getCatalogName(), 
req.getSchemaName(),
+        req.getTableName(), req.getColumnName(), user);
+  }
+
+  public static TResultSet execGetTables(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetTablesReq req = request.getGet_tables_req();
+    return MetadataOp.getTables(frontend, req.getCatalogName(), 
req.getSchemaName(),
+        req.getTableName(), req.getTableTypes(), user);
+  }
+
+  public static TResultSet execGetSchemas(
+      Frontend frontend, TMetadataOpRequest request, User user) throws 
ImpalaException {
+    TGetSchemasReq req = request.getGet_schemas_req();
+    return MetadataOp.getSchemas(
+        frontend, req.getCatalogName(), req.getSchemaName(), user);
+  }
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java 
b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
index 42304e6..57e4b51 100644
--- a/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/ColumnDef.java
@@ -33,6 +33,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.hadoop.hive.metastore.api.FieldSchema;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.TColumn;
 import org.apache.impala.util.KuduUtil;
 import org.apache.impala.util.MetaStoreUtil;
@@ -175,7 +176,7 @@ public class ColumnDef {
 
   public void analyze(Analyzer analyzer) throws AnalysisException {
     // Check whether the column name meets the Metastore's requirements.
-    if (!MetaStoreUtils.validateName(colName_)) {
+    if (!MetastoreShim.validateName(colName_)) {
       throw new AnalysisException("Invalid column/field name: " + colName_);
     }
     if (typeDef_ != null) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
index 30ca223..8ef0737 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDataSrcStmt.java
@@ -18,13 +18,13 @@
 package org.apache.impala.analysis;
 
 import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.extdatasource.ApiVersion;
 import org.apache.impala.thrift.TCreateDataSourceParams;
 import org.apache.impala.thrift.TDataSource;
+
 import com.google.common.base.Joiner;
 import com.google.common.base.Preconditions;
 
@@ -54,7 +54,7 @@ public class CreateDataSrcStmt extends StatementBase {
 
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!MetaStoreUtils.validateName(dataSrcName_)) {
+    if (!MetastoreShim.validateName(dataSrcName_)) {
       throw new AnalysisException("Invalid data source name: " + dataSrcName_);
     }
     if (!ifNotExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) != 
null) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
index 9b2fd10..9ca14f4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/CreateDbStmt.java
@@ -23,6 +23,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.impala.authorization.Privilege;
 import org.apache.impala.catalog.Db;
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.TCreateDbParams;
 
 /**
@@ -81,7 +82,7 @@ public class CreateDbStmt extends StatementBase {
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
     // Check whether the db name meets the Metastore's requirements.
-    if (!MetaStoreUtils.validateName(dbName_)) {
+    if (!MetastoreShim.validateName(dbName_)) {
       throw new AnalysisException("Invalid database name: " + dbName_);
     }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java 
b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
index e3dd9a8..32501c4 100644
--- a/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
+++ b/fe/src/main/java/org/apache/impala/analysis/DropDataSrcStmt.java
@@ -17,10 +17,10 @@
 
 package org.apache.impala.analysis;
 
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.TDropDataSourceParams;
+
 import com.google.common.base.Preconditions;
 
 /**
@@ -39,10 +39,10 @@ public class DropDataSrcStmt extends StatementBase {
 
   @Override
   public void analyze(Analyzer analyzer) throws AnalysisException {
-    if (!MetaStoreUtils.validateName(dataSrcName_) ||
+    if (!MetastoreShim.validateName(dataSrcName_) ||
         (!ifExists_ && analyzer.getCatalog().getDataSource(dataSrcName_) == 
null)) {
-      throw new AnalysisException(Analyzer.DATA_SRC_DOES_NOT_EXIST_ERROR_MSG +
-          dataSrcName_);
+      throw new AnalysisException(
+          Analyzer.DATA_SRC_DOES_NOT_EXIST_ERROR_MSG + dataSrcName_);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/analysis/Subquery.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/Subquery.java 
b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
index cc2c075..1626d18 100644
--- a/fe/src/main/java/org/apache/impala/analysis/Subquery.java
+++ b/fe/src/main/java/org/apache/impala/analysis/Subquery.java
@@ -20,11 +20,11 @@ package org.apache.impala.analysis;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.impala.catalog.ArrayType;
 import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.TExprNode;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -128,7 +128,7 @@ public class Subquery extends Expr {
       Expr expr = stmtResultExprs.get(i);
       String fieldName = null;
       // Check if the label meets the Metastore's requirements.
-      if (MetaStoreUtils.validateName(labels.get(i))) {
+      if (MetastoreShim.validateName(labels.get(i))) {
         fieldName = labels.get(i);
         // Make sure the field names are unique.
         if (!hasUniqueLabels) {

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/analysis/TableName.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TableName.java 
b/fe/src/main/java/org/apache/impala/analysis/TableName.java
index 297948e..4ebef4c 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TableName.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TableName.java
@@ -19,10 +19,10 @@ package org.apache.impala.analysis;
 
 import java.util.List;
 
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.TTableName;
+
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
@@ -54,12 +54,12 @@ public class TableName {
    */
   public void analyze() throws AnalysisException {
     if (db_ != null) {
-      if (!MetaStoreUtils.validateName(db_)) {
+      if (!MetastoreShim.validateName(db_)) {
         throw new AnalysisException("Invalid database name: " + db_);
       }
     }
     Preconditions.checkNotNull(tbl_);
-    if (!MetaStoreUtils.validateName(tbl_)) {
+    if (!MetastoreShim.validateName(tbl_)) {
       throw new AnalysisException("Invalid table/view name: " + tbl_);
     }
   }
@@ -120,4 +120,4 @@ public class TableName {
   public int hashCode() {
     return toString().toLowerCase().hashCode();
   }
-}
\ No newline at end of file
+}

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/analysis/TypeDef.java 
b/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
index 76e6a8f..f2bf215 100644
--- a/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
+++ b/fe/src/main/java/org/apache/impala/analysis/TypeDef.java
@@ -19,8 +19,6 @@ package org.apache.impala.analysis;
 
 import java.util.Set;
 
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-
 import org.apache.impala.catalog.ArrayType;
 import org.apache.impala.catalog.MapType;
 import org.apache.impala.catalog.PrimitiveType;
@@ -29,6 +27,8 @@ import org.apache.impala.catalog.StructField;
 import org.apache.impala.catalog.StructType;
 import org.apache.impala.catalog.Type;
 import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
+
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Sets;
 
@@ -128,12 +128,11 @@ public class TypeDef implements ParseNode {
     for (StructField f: structType.getFields()) {
       analyze(f.getType(), analyzer);
       if (!fieldNames.add(f.getName().toLowerCase())) {
-        throw new AnalysisException(
-            String.format("Duplicate field name '%s' in struct '%s'",
-                f.getName(), toSql()));
+        throw new AnalysisException(String.format(
+            "Duplicate field name '%s' in struct '%s'", f.getName(), toSql()));
       }
       // Check whether the column name meets the Metastore's requirements.
-      if (!MetaStoreUtils.validateName(f.getName().toLowerCase())) {
+      if (!MetastoreShim.validateName(f.getName().toLowerCase())) {
         throw new AnalysisException("Invalid struct field name: " + 
f.getName());
       }
     }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java 
b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
index 5cde346..de1a948 100644
--- a/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
+++ b/fe/src/main/java/org/apache/impala/catalog/HdfsPartition.java
@@ -36,6 +36,7 @@ import org.apache.impala.analysis.PartitionKeyValue;
 import org.apache.impala.analysis.ToSqlUtils;
 import org.apache.impala.common.FileSystemUtil;
 import org.apache.impala.common.ImpalaException;
+import org.apache.impala.common.Pair;
 import org.apache.impala.thrift.ImpalaInternalServiceConstants;
 import org.apache.impala.thrift.TAccessLevel;
 import org.apache.impala.thrift.TExpr;
@@ -442,6 +443,9 @@ public class HdfsPartition implements 
Comparable<HdfsPartition> {
   public Map<String, String> getParameters() { return hmsParameters_; }
 
   public void putToParameters(String k, String v) { hmsParameters_.put(k, v); }
+  public void putToParameters(Pair<String, String> kv) {
+    putToParameters(kv.first, kv.second);
+  }
 
   /**
    * Marks this partition's metadata as "dirty" indicating that changes have 
been

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java 
b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
index 5011769..9dc247d 100644
--- a/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
+++ b/fe/src/main/java/org/apache/impala/service/CatalogOpExecutor.java
@@ -93,6 +93,7 @@ import org.apache.impala.common.ImpalaRuntimeException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.Pair;
 import org.apache.impala.common.Reference;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.thrift.ImpalaInternalServiceConstants;
 import org.apache.impala.thrift.JniCatalogConstants;
 import org.apache.impala.thrift.TAlterTableAddDropRangePartitionParams;
@@ -809,8 +810,7 @@ public class CatalogOpExecutor {
       }
       PartitionStatsUtil.partStatsToParameters(partitionStats, partition);
       partition.putToParameters(StatsSetupConst.ROW_COUNT, 
String.valueOf(numRows));
-      partition.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
-          StatsSetupConst.TRUE);
+      
partition.putToParameters(MetastoreShim.statsGeneratedViaStatsTaskParam());
       ++numTargetedPartitions;
       modifiedParts.add(partition);
     }
@@ -828,8 +828,8 @@ public class CatalogOpExecutor {
     // Update the table's ROW_COUNT parameter.
     msTbl.putToParameters(StatsSetupConst.ROW_COUNT,
         String.valueOf(params.getTable_stats().num_rows));
-    msTbl.putToParameters(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,
-        StatsSetupConst.TRUE);
+    Pair<String, String> statsTaskParam = 
MetastoreShim.statsGeneratedViaStatsTaskParam();
+    msTbl.putToParameters(statsTaskParam.first, statsTaskParam.second);
     return numTargetedPartitions;
   }
 
@@ -2655,7 +2655,7 @@ public class CatalogOpExecutor {
             cacheIds.add(id);
           }
           // Update the partition metadata to include the cache directive id.
-          msClient.getHiveClient().alter_partitions(tableName.getDb(),
+          MetastoreShim.alterPartitions(msClient.getHiveClient(), 
tableName.getDb(),
               tableName.getTbl(), hmsAddedPartitions);
         }
         updateLastDdlTime(msTbl, msClient);
@@ -2807,8 +2807,8 @@ public class CatalogOpExecutor {
       MetaStoreClient msClient, TableName tableName, List<Partition> 
hmsPartitions)
       throws ImpalaException {
     try {
-      msClient.getHiveClient().alter_partitions(tableName.getDb(), 
tableName.getTbl(),
-          hmsPartitions);
+      MetastoreShim.alterPartitions(
+          msClient.getHiveClient(), tableName.getDb(), tableName.getTbl(), 
hmsPartitions);
       updateLastDdlTime(msTbl, msClient);
     } catch (TException e) {
       throw new ImpalaRuntimeException(
@@ -2962,11 +2962,11 @@ public class CatalogOpExecutor {
             Math.min(i + MAX_PARTITION_UPDATES_PER_RPC, hmsPartitions.size());
         try {
           // Alter partitions in bulk.
-          msClient.getHiveClient().alter_partitions(dbName, tableName,
+          MetastoreShim.alterPartitions(msClient.getHiveClient(), dbName, 
tableName,
               hmsPartitions.subList(i, endPartitionIndex));
           // Mark the corresponding HdfsPartition objects as dirty
           for (org.apache.hadoop.hive.metastore.api.Partition msPartition:
-               hmsPartitions.subList(i, endPartitionIndex)) {
+              hmsPartitions.subList(i, endPartitionIndex)) {
             try {
               catalog_.getHdfsPartition(dbName, tableName, 
msPartition).markDirty();
             } catch (PartitionNotFoundException e) {
@@ -3221,7 +3221,7 @@ public class CatalogOpExecutor {
               
partition.getSd().setSerdeInfo(msTbl.getSd().getSerdeInfo().deepCopy());
               partition.getSd().setLocation(msTbl.getSd().getLocation() + "/" +
                   partName.substring(0, partName.length() - 1));
-              MetaStoreUtils.updatePartitionStatsFast(partition, warehouse);
+              MetastoreShim.updatePartitionStatsFast(partition, warehouse);
             }
 
             // First add_partitions and then alter_partitions the successful 
ones with
@@ -3251,7 +3251,7 @@ public class CatalogOpExecutor {
                   }
                 }
                 try {
-                  msClient.getHiveClient().alter_partitions(tblName.getDb(),
+                  MetastoreShim.alterPartitions(msClient.getHiveClient(), 
tblName.getDb(),
                       tblName.getTbl(), cachedHmsParts);
                 } catch (Exception e) {
                   LOG.error("Failed in alter_partitions: ", e);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/service/Frontend.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/service/Frontend.java 
b/fe/src/main/java/org/apache/impala/service/Frontend.java
index 1348129..13faba5 100644
--- a/fe/src/main/java/org/apache/impala/service/Frontend.java
+++ b/fe/src/main/java/org/apache/impala/service/Frontend.java
@@ -35,10 +35,10 @@ import java.util.concurrent.atomic.AtomicReference;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
 import org.apache.impala.analysis.AnalysisContext;
 import org.apache.impala.analysis.CreateDataSrcStmt;
 import org.apache.impala.analysis.CreateDropRoleStmt;
@@ -87,6 +87,7 @@ import org.apache.impala.common.ImpalaException;
 import org.apache.impala.common.InternalException;
 import org.apache.impala.common.NotImplementedException;
 import org.apache.impala.planner.HdfsScanNode;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.planner.PlanFragment;
 import org.apache.impala.planner.Planner;
 import org.apache.impala.planner.ScanNode;
@@ -1197,32 +1198,12 @@ public class Frontend {
         ImpalaInternalAdminUser.getInstance();
     switch (request.opcode) {
       case GET_TYPE_INFO: return MetadataOp.getTypeInfo();
-      case GET_SCHEMAS:
-      {
-        TGetSchemasReq req = request.getGet_schemas_req();
-        return MetadataOp.getSchemas(this, req.getCatalogName(),
-            req.getSchemaName(), user);
-      }
-      case GET_TABLES:
-      {
-        TGetTablesReq req = request.getGet_tables_req();
-        return MetadataOp.getTables(this, req.getCatalogName(),
-            req.getSchemaName(), req.getTableName(), req.getTableTypes(), 
user);
-      }
-      case GET_COLUMNS:
-      {
-        TGetColumnsReq req = request.getGet_columns_req();
-        return MetadataOp.getColumns(this, req.getCatalogName(),
-            req.getSchemaName(), req.getTableName(), req.getColumnName(), 
user);
-      }
+      case GET_SCHEMAS: return MetastoreShim.execGetSchemas(this, request, 
user);
+      case GET_TABLES: return MetastoreShim.execGetTables(this, request, user);
+      case GET_COLUMNS: return MetastoreShim.execGetColumns(this, request, 
user);
       case GET_CATALOGS: return MetadataOp.getCatalogs();
       case GET_TABLE_TYPES: return MetadataOp.getTableTypes();
-      case GET_FUNCTIONS:
-      {
-        TGetFunctionsReq req = request.getGet_functions_req();
-        return MetadataOp.getFunctions(this, req.getCatalogName(),
-            req.getSchemaName(), req.getFunctionName(), user);
-      }
+      case GET_FUNCTIONS: return MetastoreShim.execGetFunctions(this, request, 
user);
       default:
         throw new NotImplementedException(request.opcode + " has not been 
implemented.");
     }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
----------------------------------------------------------------------
diff --git a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java 
b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
index 8c9ac24..07435ae 100644
--- a/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
+++ b/fe/src/main/java/org/apache/impala/util/MetaStoreUtil.java
@@ -24,11 +24,12 @@ import org.apache.hadoop.hive.conf.HiveConf;
 import org.apache.hadoop.hive.metastore.IMetaStoreClient;
 import org.apache.hadoop.hive.metastore.api.MetaException;
 import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.impala.catalog.HdfsTable;
+import org.apache.impala.common.AnalysisException;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.log4j.Logger;
 import org.apache.thrift.TException;
 
-import org.apache.impala.catalog.HdfsTable;
-import org.apache.impala.common.AnalysisException;
 import com.google.common.base.Preconditions;
 import com.google.common.collect.Lists;
 
@@ -63,8 +64,8 @@ public class MetaStoreUtil {
   static {
     // Get the value from the Hive configuration, if present.
     HiveConf hiveConf = new HiveConf(HdfsTable.class);
-    String strValue = hiveConf.get(
-        
HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX.toString());
+    String strValue =
+        
hiveConf.get(MetastoreShim.metastoreBatchRetrieveObjectsMaxConfigKey());
     if (strValue != null) {
       try {
         maxPartitionsPerRpc_ = Short.parseShort(strValue);

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java 
b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
index dfc2580..e9d6698 100644
--- a/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/AuthorizationTest.java
@@ -30,10 +30,10 @@ import java.util.UUID;
 
 import org.apache.hadoop.conf.Configuration;
 import static 
org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
 import org.apache.sentry.provider.common.ResourceAuthorizationProvider;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
 import org.apache.sentry.provider.file.LocalGroupResourceAuthorizationProvider;
 import org.junit.After;
 import org.junit.AfterClass;

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java 
b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
index d0176d2..44a543d 100644
--- a/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
+++ b/fe/src/test/java/org/apache/impala/analysis/ParserTest.java
@@ -26,10 +26,10 @@ import java.math.BigInteger;
 import java.util.ArrayList;
 import java.util.List;
 
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
 import org.apache.impala.analysis.TimestampArithmeticExpr.TimeUnit;
 import org.apache.impala.common.AnalysisException;
 import org.apache.impala.common.FrontendTestBase;
+import org.apache.impala.compat.MetastoreShim;
 import org.apache.impala.testutil.TestUtils;
 import org.junit.Test;
 
@@ -2947,7 +2947,7 @@ public class ParserTest extends FrontendTestBase {
     // may have unquoted identifiers corresponding to keywords.
     for (String keyword: SqlScanner.keywordMap.keySet()) {
       // Skip keywords that are not valid field/column names in the Metastore.
-      if (!MetaStoreUtils.validateName(keyword)) continue;
+      if (!MetastoreShim.validateName(keyword)) continue;
       String structType = "STRUCT<" + keyword + ":INT>";
       TypeDefsParseOk(structType);
     }

http://git-wip-us.apache.org/repos/asf/incubator-impala/blob/bdad90e6/fe/src/test/java/org/apache/impala/service/FrontendTest.java
----------------------------------------------------------------------
diff --git a/fe/src/test/java/org/apache/impala/service/FrontendTest.java 
b/fe/src/test/java/org/apache/impala/service/FrontendTest.java
index 24056dc..dbd35b3 100644
--- a/fe/src/test/java/org/apache/impala/service/FrontendTest.java
+++ b/fe/src/test/java/org/apache/impala/service/FrontendTest.java
@@ -25,12 +25,12 @@ import java.util.List;
 import java.util.Set;
 
 import org.apache.commons.lang.exception.ExceptionUtils;
-import org.apache.hive.service.cli.thrift.TGetCatalogsReq;
-import org.apache.hive.service.cli.thrift.TGetColumnsReq;
-import org.apache.hive.service.cli.thrift.TGetFunctionsReq;
-import org.apache.hive.service.cli.thrift.TGetInfoReq;
-import org.apache.hive.service.cli.thrift.TGetSchemasReq;
-import org.apache.hive.service.cli.thrift.TGetTablesReq;
+import org.apache.hive.service.rpc.thrift.TGetCatalogsReq;
+import org.apache.hive.service.rpc.thrift.TGetColumnsReq;
+import org.apache.hive.service.rpc.thrift.TGetFunctionsReq;
+import org.apache.hive.service.rpc.thrift.TGetInfoReq;
+import org.apache.hive.service.rpc.thrift.TGetSchemasReq;
+import org.apache.hive.service.rpc.thrift.TGetTablesReq;
 import org.junit.Test;
 import org.apache.impala.analysis.AuthorizationTest;
 import org.apache.impala.authorization.AuthorizationConfig;


Reply via email to