HIVE-17982 Move metastore specific itests.  This closes #279.  (Alan Gates, 
reviewed by Peter Vary)


Project: http://git-wip-us.apache.org/repos/asf/hive/repo
Commit: http://git-wip-us.apache.org/repos/asf/hive/commit/d9801d9c
Tree: http://git-wip-us.apache.org/repos/asf/hive/tree/d9801d9c
Diff: http://git-wip-us.apache.org/repos/asf/hive/diff/d9801d9c

Branch: refs/heads/master
Commit: d9801d9c6c406d5871147b80bc2e0359c3dbd085
Parents: fde503d
Author: Alan Gates <ga...@hortonworks.com>
Authored: Tue Jan 16 13:36:45 2018 -0800
Committer: Alan Gates <ga...@hortonworks.com>
Committed: Tue Jan 16 13:36:45 2018 -0800

----------------------------------------------------------------------
 .../apache/hadoop/hive/metastore/FakeDerby.java |  424 ---
 .../hive/metastore/TestAcidTableSetup.java      |  245 ++
 .../hadoop/hive/metastore/TestAdminUser.java    |   45 -
 .../metastore/TestEmbeddedHiveMetaStore.java    |   54 -
 .../hadoop/hive/metastore/TestFilterHooks.java  |  280 --
 .../hive/metastore/TestHiveMetaStore.java       | 3514 ------------------
 .../hive/metastore/TestHiveMetaStoreTxns.java   |  270 --
 ...TestHiveMetaStoreWithEnvironmentContext.java |  219 --
 .../hive/metastore/TestMarkPartition.java       |  108 -
 .../hive/metastore/TestMarkPartitionRemote.java |   32 -
 .../TestMetaStoreEndFunctionListener.java       |  142 -
 .../metastore/TestMetaStoreEventListener.java   |  526 ---
 .../TestMetaStoreEventListenerOnlyOnCommit.java |  103 -
 .../metastore/TestMetaStoreInitListener.java    |   67 -
 .../metastore/TestMetaStoreListenersError.java  |   85 -
 .../metastore/TestObjectStoreInitRetry.java     |  127 -
 .../TestPartitionNameWhitelistValidation.java   |  123 -
 .../hive/metastore/TestRemoteHiveMetaStore.java |   60 -
 .../TestRemoteHiveMetaStoreIpAddress.java       |   80 -
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |   28 -
 .../hive/metastore/TestRetryingHMSHandler.java  |  123 -
 .../metastore/TestSetUGIOnBothClientServer.java |   31 -
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |   31 -
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |   31 -
 standalone-metastore/pom.xml                    |    2 +
 .../metastore/client/builder/IndexBuilder.java  |    5 +
 .../client/builder/PartitionBuilder.java        |    3 +-
 .../metastore/client/builder/TableBuilder.java  |   12 +-
 .../hive/metastore/conf/MetastoreConf.java      |    2 +-
 .../apache/hadoop/hive/metastore/FakeDerby.java |  404 ++
 .../hive/metastore/MetaStoreTestUtils.java      |   32 +-
 .../hadoop/hive/metastore/TestAdminUser.java    |   46 +
 .../metastore/TestEmbeddedHiveMetaStore.java    |   48 +
 .../hadoop/hive/metastore/TestFilterHooks.java  |  302 ++
 .../hive/metastore/TestHiveMetaStore.java       | 3071 +++++++++++++++
 .../hive/metastore/TestHiveMetaStoreTxns.java   |  264 ++
 ...TestHiveMetaStoreWithEnvironmentContext.java |  187 +
 .../hive/metastore/TestMarkPartition.java       |  117 +
 .../hive/metastore/TestMarkPartitionRemote.java |   36 +
 .../TestMetaStoreEndFunctionListener.java       |  145 +
 .../metastore/TestMetaStoreEventListener.java   |  557 +++
 .../TestMetaStoreEventListenerOnlyOnCommit.java |  123 +
 .../TestMetaStoreEventListenerWithOldConf.java  |  178 +
 .../metastore/TestMetaStoreInitListener.java    |   54 +
 .../metastore/TestMetaStoreListenersError.java  |   94 +
 .../hadoop/hive/metastore/TestObjectStore.java  |    9 +-
 .../metastore/TestObjectStoreInitRetry.java     |  132 +
 .../hadoop/hive/metastore/TestOldSchema.java    |    3 +-
 .../TestPartitionNameWhitelistValidation.java   |  122 +
 .../hive/metastore/TestRemoteHiveMetaStore.java |   62 +
 .../TestRemoteHiveMetaStoreIpAddress.java       |   64 +
 .../TestRemoteUGIHiveMetaStoreIpAddress.java    |   28 +
 .../hive/metastore/TestRetryingHMSHandler.java  |   81 +
 .../metastore/TestSetUGIOnBothClientServer.java |   31 +
 .../hive/metastore/TestSetUGIOnOnlyClient.java  |   32 +
 .../hive/metastore/TestSetUGIOnOnlyServer.java  |   32 +
 .../hive/metastore/cache/TestCachedStore.java   |    4 +-
 57 files changed, 6505 insertions(+), 6525 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
deleted file mode 100644
index 51be504..0000000
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/FakeDerby.java
+++ /dev/null
@@ -1,424 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import java.lang.Exception;
-import java.lang.Override;
-import java.lang.RuntimeException;
-import java.lang.StackTraceElement;
-import java.sql.Array;
-import java.sql.Blob;
-import java.sql.CallableStatement;
-import java.sql.Clob;
-import java.sql.Connection;
-import java.sql.DatabaseMetaData;
-import java.sql.DriverManager;
-import java.sql.DriverPropertyInfo;
-import java.sql.NClob;
-import java.sql.PreparedStatement;
-import java.sql.SQLClientInfoException;
-import java.sql.SQLException;
-import java.sql.SQLFeatureNotSupportedException;
-import java.sql.SQLWarning;
-import java.sql.SQLXML;
-import java.sql.Savepoint;
-import java.sql.Statement;
-import java.sql.Struct;
-import java.util.Map;
-import java.util.concurrent.Executor;
-import java.util.logging.Logger;
-import java.util.Properties;
-
-import javax.jdo.JDOCanRetryException;
-
-import junit.framework.TestCase;
-import org.junit.Test;
-
-import org.apache.derby.jdbc.EmbeddedDriver;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.ObjectStore;
-
-import org.apache.hadoop.hive.metastore.TestObjectStoreInitRetry;
-
-
-/**
- * Fake derby driver - companion class to enable testing by 
TestObjectStoreInitRetry
- */
-public class FakeDerby extends org.apache.derby.jdbc.EmbeddedDriver {
-
-  public class Connection implements java.sql.Connection {
-
-    private java.sql.Connection _baseConn;
-
-    public Connection(java.sql.Connection connection) {
-      TestObjectStoreInitRetry.debugTrace();
-      this._baseConn = connection;
-    }
-
-    @Override
-    public Statement createStatement() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStatement();
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql);
-    }
-
-    @Override
-    public CallableStatement prepareCall(String sql) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareCall(sql);
-    }
-
-    @Override
-    public String nativeSQL(String sql) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.nativeSQL(sql);
-    }
-
-    @Override
-    public void setAutoCommit(boolean autoCommit) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      TestObjectStoreInitRetry.misbehave();
-      _baseConn.setAutoCommit(autoCommit);
-    }
-
-    @Override
-    public boolean getAutoCommit() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getAutoCommit();
-    }
-
-    @Override
-    public void commit() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.commit();
-    }
-
-    @Override
-    public void rollback() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.rollback();
-    }
-
-    @Override
-    public void close() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.close();
-    }
-
-    @Override
-    public boolean isClosed() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isClosed();
-    }
-
-    @Override
-    public DatabaseMetaData getMetaData() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getMetaData();
-    }
-
-    @Override
-    public void setReadOnly(boolean readOnly) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setReadOnly(readOnly);
-    }
-
-    @Override
-    public boolean isReadOnly() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isReadOnly();
-    }
-
-    @Override
-    public void setCatalog(String catalog) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setCatalog(catalog);
-    }
-
-    @Override
-    public String getCatalog() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getCatalog();
-    }
-
-    @Override
-    public void setTransactionIsolation(int level) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setTransactionIsolation(level);
-    }
-
-    @Override
-    public int getTransactionIsolation() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getTransactionIsolation();
-    }
-
-    @Override
-    public SQLWarning getWarnings() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getWarnings();
-    }
-
-    @Override
-    public void clearWarnings() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.clearWarnings();
-    }
-
-    @Override
-    public Statement createStatement(int resultSetType, int 
resultSetConcurrency) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStatement(resultSetType, resultSetConcurrency);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int resultSetType, 
int resultSetConcurrency) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, resultSetType, 
resultSetConcurrency);
-    }
-
-    @Override
-    public CallableStatement prepareCall(String sql, int resultSetType, int 
resultSetConcurrency) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareCall(sql, resultSetType, resultSetConcurrency);
-    }
-
-    @Override
-    public Map<String, Class<?>> getTypeMap() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getTypeMap();
-    }
-
-    @Override
-    public void setTypeMap(Map<String, Class<?>> map) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setTypeMap(map);
-    }
-
-    @Override
-    public void setHoldability(int holdability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setHoldability(holdability);
-    }
-
-    @Override
-    public int getHoldability() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getHoldability();
-    }
-
-    @Override
-    public Savepoint setSavepoint() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.setSavepoint();
-    }
-
-    @Override
-    public Savepoint setSavepoint(String name) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.setSavepoint(name);
-    }
-
-    @Override
-    public void rollback(Savepoint savepoint) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.rollback(savepoint);
-    }
-
-    @Override
-    public void releaseSavepoint(Savepoint savepoint) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.releaseSavepoint(savepoint);
-    }
-
-    @Override
-    public Statement createStatement(int resultSetType, int 
resultSetConcurrency, int resultSetHoldability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStatement(resultSetType, resultSetConcurrency, 
resultSetHoldability);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int resultSetType, 
int resultSetConcurrency, int resultSetHoldability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, resultSetType, 
resultSetConcurrency, resultSetHoldability);
-    }
-
-    @Override
-    public CallableStatement prepareCall(String sql, int resultSetType, int 
resultSetConcurrency, int resultSetHoldability) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareCall(sql, resultSetType, resultSetConcurrency, 
resultSetHoldability);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int 
autoGeneratedKeys) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, autoGeneratedKeys);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, int[] columnIndexes) 
throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, columnIndexes);
-    }
-
-    @Override
-    public PreparedStatement prepareStatement(String sql, String[] 
columnNames) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.prepareStatement(sql, columnNames);
-    }
-
-    @Override
-    public Clob createClob() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createClob();
-    }
-
-    @Override
-    public Blob createBlob() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createBlob();
-    }
-
-    @Override
-    public NClob createNClob() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createNClob();
-    }
-
-    @Override
-    public SQLXML createSQLXML() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createSQLXML();
-    }
-
-    @Override
-    public boolean isValid(int timeout) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isValid(timeout);
-    }
-
-    @Override
-    public void setClientInfo(String name, String value) throws 
SQLClientInfoException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setClientInfo(name, value);
-    }
-
-    @Override
-    public void setClientInfo(Properties properties) throws 
SQLClientInfoException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setClientInfo(properties);
-    }
-
-    @Override
-    public String getClientInfo(String name) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getClientInfo(name);
-    }
-
-    @Override
-    public Properties getClientInfo() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getClientInfo();
-    }
-
-    @Override
-    public Array createArrayOf(String typeName, Object[] elements) throws 
SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createArrayOf(typeName, elements);
-    }
-
-    @Override
-    public Struct createStruct(String typeName, Object[] attributes) throws 
SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.createStruct(typeName, attributes);
-    }
-
-    @Override
-    public void setSchema(String schema) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setSchema(schema);
-    }
-
-    @Override
-    public String getSchema() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getSchema();
-    }
-
-    @Override
-    public void abort(Executor executor) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.abort(executor);
-    }
-
-    @Override
-    public void setNetworkTimeout(Executor executor, int milliseconds) throws 
SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      _baseConn.setNetworkTimeout(executor, milliseconds);
-    }
-
-    @Override
-    public int getNetworkTimeout() throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.getNetworkTimeout();
-    }
-
-    @Override
-    public <T> T unwrap(Class<T> iface) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.unwrap(iface);
-    }
-
-    @Override
-    public boolean isWrapperFor(Class<?> iface) throws SQLException {
-      TestObjectStoreInitRetry.debugTrace();
-      return _baseConn.isWrapperFor(iface);
-    }
-  }
-
-  public FakeDerby(){
-  }
-
-  @Override
-  public boolean acceptsURL(String url) throws SQLException {
-    url = url.replace("fderby","derby");
-    return super.acceptsURL(url);
-  }
-
-  @Override
-  public Connection connect(java.lang.String url, java.util.Properties info) 
throws SQLException {
-    TestObjectStoreInitRetry.misbehave();
-    url = url.replace("fderby","derby");
-    return new FakeDerby.Connection(super.connect(url, info));
-  }
-
-  @Override
-  public Logger getParentLogger() throws SQLFeatureNotSupportedException {
-    throw new SQLFeatureNotSupportedException(); // hope this is respected 
properly
-  }
-
-
-};

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java
new file mode 100644
index 0000000..62bd94a
--- /dev/null
+++ 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAcidTableSetup.java
@@ -0,0 +1,245 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.metastore;
+
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+
+import org.apache.hadoop.hive.metastore.client.builder.TableBuilder;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf;
+import org.apache.hadoop.hive.metastore.conf.MetastoreConf.ConfVars;
+import org.junit.Before;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hive.metastore.api.Database;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.InvalidOperationException;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.hadoop.hive.metastore.api.Type;
+import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants;
+import org.apache.thrift.TException;
+import org.junit.Test;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+public class TestAcidTableSetup {
+  private static final Logger LOG = 
LoggerFactory.getLogger(TestHiveMetaStore.class);
+  protected static HiveMetaStoreClient client;
+  protected static Configuration conf;
+
+  @Before
+  public void setUp() throws Exception {
+    conf = MetastoreConf.newMetastoreConf();
+
+    MetastoreConf.setClass(conf, ConfVars.EXPRESSION_PROXY_CLASS,
+        DefaultPartitionExpressionProxy.class, PartitionExpressionProxy.class);
+    client = new HiveMetaStoreClient(conf);
+  }
+
+  @Test
+  public void testTransactionalValidation() throws Throwable {
+    String dbName = "acidDb";
+    silentDropDatabase(dbName);
+    Database db = new Database();
+    db.setName(dbName);
+    client.createDatabase(db);
+    String tblName = "acidTable";
+    Map<String, String> fields = new HashMap<>();
+    fields.put("name", ColumnType.STRING_TYPE_NAME);
+    fields.put("income", ColumnType.INT_TYPE_NAME);
+
+    Type type = createType("Person1", fields);
+
+    Map<String, String> params = new HashMap<>();
+    params.put("transactional", "");
+
+    /// CREATE TABLE scenarios
+
+    // Fail - No "transactional" property is specified
+    try {
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("'transactional' property of TBLPROPERTIES may only have 
value 'true': acidDb.acidTable",
+          e.getMessage());
+    }
+
+    // Fail - "transactional" property is set to an invalid value
+    try {
+      params.clear();
+      params.put("transactional", "foobar");
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("'transactional' property of TBLPROPERTIES may only have 
value 'true': acidDb.acidTable",
+          e.getMessage());
+    }
+
+    // Fail - "transactional" is set to true, but the table is not bucketed
+    try {
+      params.clear();
+      params.put("transactional", "true");
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("The table must be stored using an ACID compliant format 
(such as ORC): acidDb.acidTable",
+          e.getMessage());
+    }
+
+    List<String> bucketCols = new ArrayList<>();
+    bucketCols.add("income");
+    // Fail - "transactional" is set to true, and the table is bucketed, but 
doesn't use ORC
+    try {
+      params.clear();
+      params.put("transactional", "true");
+      Table t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setTableParams(params)
+          .setCols(type.getFields())
+          .setBucketCols(bucketCols)
+          .build();
+      client.createTable(t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("The table must be stored using an ACID compliant format 
(such as ORC): acidDb.acidTable",
+          e.getMessage());
+    }
+
+    // Succeed - "transactional" is set to true, and the table is bucketed, 
and uses ORC
+    params.clear();
+    params.put("transactional", "true");
+    Table t = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .setTableParams(params)
+        .setCols(type.getFields())
+        .setBucketCols(bucketCols)
+        .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat")
+        .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")
+        .build();
+    client.createTable(t);
+    assertTrue("CREATE TABLE should succeed",
+        
"true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
+
+    /// ALTER TABLE scenarios
+
+    // Fail - trying to set "transactional" to "false" is not allowed
+    try {
+      params.clear();
+      params.put("transactional", "false");
+      t = new Table();
+      t.setParameters(params);
+      t.setDbName(dbName);
+      t.setTableName(tblName);
+      client.alter_table(dbName, tblName, t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("TBLPROPERTIES with 'transactional'='true' cannot be unset: 
acidDb.acidTable", e.getMessage());
+    }
+
+    // Fail - trying to set "transactional" to "true" but doesn't satisfy 
bucketing and Input/OutputFormat requirement
+    try {
+      tblName += "1";
+      params.clear();
+      t = new TableBuilder()
+          .setDbName(dbName)
+          .setTableName(tblName)
+          .setCols(type.getFields())
+          .setInputFormat("org.apache.hadoop.mapred.FileInputFormat")
+          .build();
+      client.createTable(t);
+      params.put("transactional", "true");
+      t.setParameters(params);
+      client.alter_table(dbName, tblName, t);
+      fail("Expected exception");
+    } catch (MetaException e) {
+      assertEquals("The table must be stored using an ACID compliant format 
(such as ORC): acidDb.acidTable1",
+          e.getMessage());
+    }
+
+    // Succeed - trying to set "transactional" to "true", and satisfies 
bucketing and Input/OutputFormat requirement
+    tblName += "2";
+    params.clear();
+    t = new TableBuilder()
+        .setDbName(dbName)
+        .setTableName(tblName)
+        .setCols(type.getFields())
+        .setNumBuckets(1)
+        .setBucketCols(bucketCols)
+        .setInputFormat("org.apache.hadoop.hive.ql.io.orc.OrcInputFormat")
+        .setOutputFormat("org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat")
+        .build();
+    client.createTable(t);
+    params.put("transactional", "true");
+    t.setParameters(params);
+    client.alter_table(dbName, tblName, t);
+    assertTrue("ALTER TABLE should succeed",
+        
"true".equals(t.getParameters().get(hive_metastoreConstants.TABLE_IS_TRANSACTIONAL)));
+  }
+
+  private static void silentDropDatabase(String dbName) throws TException {
+    try {
+      for (String tableName : client.getTables(dbName, "*")) {
+        client.dropTable(dbName, tableName);
+      }
+      client.dropDatabase(dbName);
+    } catch (NoSuchObjectException|InvalidOperationException e) {
+      // NOP
+    }
+  }
+
+  private Type createType(String typeName, Map<String, String> fields) throws 
Throwable {
+    Type typ1 = new Type();
+    typ1.setName(typeName);
+    typ1.setFields(new ArrayList<>(fields.size()));
+    for(String fieldName : fields.keySet()) {
+      typ1.getFields().add(
+          new FieldSchema(fieldName, fields.get(fieldName), ""));
+    }
+    client.createType(typ1);
+    return typ1;
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
deleted file mode 100644
index e9dabee..0000000
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestAdminUser.java
+++ /dev/null
@@ -1,45 +0,0 @@
-/**
-* Licensed to the Apache Software Foundation (ASF) under one
-* or more contributor license agreements.  See the NOTICE file
-* distributed with this work for additional information
-* regarding copyright ownership.  The ASF licenses this file
-* to you under the Apache License, Version 2.0 (the
-* "License"); you may not use this file except in compliance
-* with the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-package org.apache.hadoop.hive.metastore;
-
-import java.io.IOException;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.HiveMetaStore.HMSHandler;
-import org.apache.hadoop.hive.metastore.api.PrincipalType;
-import org.apache.hadoop.hive.metastore.api.Role;
-import 
org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory;
-
-public class TestAdminUser extends TestCase{
-
- public void testCreateAdminNAddUser() throws IOException, Throwable {
-   HiveConf conf = new HiveConf();
-   conf.setVar(ConfVars.USERS_IN_ADMIN_ROLE, "adminuser");
-   
conf.setVar(ConfVars.HIVE_AUTHORIZATION_MANAGER,SQLStdHiveAuthorizerFactory.class.getName());
-   RawStore rawStore = new HMSHandler("testcreateroot", conf).getMS();
-   Role adminRole = rawStore.getRole(HiveMetaStore.ADMIN);
-   assertTrue(adminRole.getOwnerName().equals(HiveMetaStore.ADMIN));
-   assertEquals(rawStore.listPrincipalGlobalGrants(HiveMetaStore.ADMIN, 
PrincipalType.ROLE)
-    .get(0).getGrantInfo().getPrivilege(),"All");
-   assertEquals(rawStore.listRoles("adminuser", PrincipalType.USER).get(0).
-     getRoleName(),HiveMetaStore.ADMIN);
- }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
deleted file mode 100644
index 462768d..0000000
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestEmbeddedHiveMetaStore.java
+++ /dev/null
@@ -1,54 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hive.metastore;
-
-import org.apache.hadoop.util.StringUtils;
-
-public class TestEmbeddedHiveMetaStore extends TestHiveMetaStore {
-
-  @Override
-  protected void setUp() throws Exception {
-    super.setUp();
-    warehouse = new Warehouse(hiveConf);
-    client = createClient();
-  }
-
-  @Override
-  protected void tearDown() throws Exception {
-    try {
-      super.tearDown();
-      client.close();
-    } catch (Throwable e) {
-      System.err.println("Unable to close metastore");
-      System.err.println(StringUtils.stringifyException(e));
-      throw new Exception(e);
-    }
-  }
-
-  @Override
-  protected HiveMetaStoreClient createClient() throws Exception {
-    try {
-      return new HiveMetaStoreClient(hiveConf);
-    } catch (Throwable e) {
-      System.err.println("Unable to open the metastore");
-      System.err.println(StringUtils.stringifyException(e));
-      throw new Exception(e);
-    }
-  }
-}

http://git-wip-us.apache.org/repos/asf/hive/blob/d9801d9c/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
----------------------------------------------------------------------
diff --git 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
 
b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
deleted file mode 100644
index 2382582..0000000
--- 
a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/TestFilterHooks.java
+++ /dev/null
@@ -1,280 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hive.metastore;
-
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertNotNull;
-import static org.junit.Assert.fail;
-
-import java.util.ArrayList;
-import java.util.List;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.UtilsForTest;
-import org.apache.hadoop.hive.cli.CliSessionState;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
-import org.apache.hadoop.hive.metastore.api.Database;
-import org.apache.hadoop.hive.metastore.api.Index;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.metastore.api.Partition;
-import org.apache.hadoop.hive.metastore.api.PartitionSpec;
-import org.apache.hadoop.hive.metastore.api.Table;
-import org.apache.hadoop.hive.ql.DriverFactory;
-import org.apache.hadoop.hive.ql.IDriver;
-import org.apache.hadoop.hive.ql.session.SessionState;
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
-import org.junit.Test;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-
-public class TestFilterHooks {
-  private static final Logger LOG = 
LoggerFactory.getLogger(TestFilterHooks.class);
-
-  public static class DummyMetaStoreFilterHookImpl extends 
DefaultMetaStoreFilterHookImpl {
-    public static boolean blockResults = false;
-
-    public DummyMetaStoreFilterHookImpl(Configuration conf) {
-      super(conf);
-    }
-
-    @Override
-    public List<String> filterDatabases(List<String> dbList) throws 
MetaException  {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterDatabases(dbList);
-    }
-
-    @Override
-    public Database filterDatabase(Database dataBase) throws 
NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterDatabase(dataBase);
-    }
-
-    @Override
-    public List<String> filterTableNames(String dbName, List<String> 
tableList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterTableNames(dbName, tableList);
-    }
-
-    @Override
-    public Table filterTable(Table table) throws NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterTable(table);
-    }
-
-    @Override
-    public List<Table> filterTables(List<Table> tableList) throws 
MetaException {
-      if (blockResults) {
-        return new ArrayList<Table>();
-      }
-      return super.filterTables(tableList);
-    }
-
-    @Override
-    public List<Partition> filterPartitions(List<Partition> partitionList) 
throws MetaException {
-      if (blockResults) {
-        return new ArrayList<Partition>();
-      }
-      return super.filterPartitions(partitionList);
-    }
-
-    @Override
-    public List<PartitionSpec> filterPartitionSpecs(
-        List<PartitionSpec> partitionSpecList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<PartitionSpec>();
-      }
-      return super.filterPartitionSpecs(partitionSpecList);
-    }
-
-    @Override
-    public Partition filterPartition(Partition partition) throws 
NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterPartition(partition);
-    }
-
-    @Override
-    public List<String> filterPartitionNames(String dbName, String tblName,
-        List<String> partitionNames) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterPartitionNames(dbName, tblName, partitionNames);
-    }
-
-    @Override
-    public Index filterIndex(Index index) throws NoSuchObjectException {
-      if (blockResults) {
-        throw new NoSuchObjectException("Blocked access");
-      }
-      return super.filterIndex(index);
-    }
-
-    @Override
-    public List<String> filterIndexNames(String dbName, String tblName,
-        List<String> indexList) throws MetaException {
-      if (blockResults) {
-        return new ArrayList<String>();
-      }
-      return super.filterIndexNames(dbName, tblName, indexList);
-    }
-
-    @Override
-    public List<Index> filterIndexes(List<Index> indexeList) throws 
MetaException {
-      if (blockResults) {
-        return new ArrayList<Index>();
-      }
-      return super.filterIndexes(indexeList);
-    }
-  }
-
-  private static final String DBNAME1 = "testdb1";
-  private static final String DBNAME2 = "testdb2";
-  private static final String TAB1 = "tab1";
-  private static final String TAB2 = "tab2";
-  private static final String INDEX1 = "idx1";
-  private static HiveConf hiveConf;
-  private static HiveMetaStoreClient msc;
-  private static IDriver driver;
-
-  @BeforeClass
-  public static void setUp() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = false;
-
-    hiveConf = new HiveConf(TestFilterHooks.class);
-    hiveConf.setIntVar(HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, 3);
-    hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, "");
-    hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false");
-    hiveConf.setVar(ConfVars.METASTORE_FILTER_HOOK, 
DummyMetaStoreFilterHookImpl.class.getName());
-    UtilsForTest.setNewDerbyDbLocation(hiveConf, 
TestFilterHooks.class.getSimpleName());
-    int port = MetaStoreTestUtils.startMetaStoreWithRetry(hiveConf);
-    hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "thrift://localhost:" + 
port);
-
-    SessionState.start(new CliSessionState(hiveConf));
-    msc = new HiveMetaStoreClient(hiveConf);
-    driver = DriverFactory.newDriver(hiveConf);
-
-    driver.run("drop database if exists " + DBNAME1  + " cascade");
-    driver.run("drop database if exists " + DBNAME2  + " cascade");
-    driver.run("create database " + DBNAME1);
-    driver.run("create database " + DBNAME2);
-    driver.run("use " + DBNAME1);
-    driver.run("create table " + DBNAME1 + "." + TAB1 + " (id int, name 
string)");
-    driver.run("create table " + TAB2 + " (id int) partitioned by (name 
string)");
-    driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value1')");
-    driver.run("ALTER TABLE " + TAB2 + " ADD PARTITION (name='value2')");
-    driver.run("CREATE INDEX " + INDEX1 + " on table " + TAB1 + "(id) AS 
'COMPACT' WITH DEFERRED REBUILD");
-  }
-
-  @AfterClass
-  public static void tearDown() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = false;
-    driver.run("drop database if exists " + DBNAME1  + " cascade");
-    driver.run("drop database if exists " + DBNAME2  + " cascade");
-    driver.close();
-    driver.destroy();
-    msc.close();
-  }
-
-  @Test
-  public void testDefaultFilter() throws Exception {
-    assertNotNull(msc.getTable(DBNAME1, TAB1));
-    assertEquals(3, msc.getTables(DBNAME1, "*").size());
-    assertEquals(3, msc.getAllTables(DBNAME1).size());
-    assertEquals(1, msc.getTables(DBNAME1, TAB2).size());
-    assertEquals(0, msc.getAllTables(DBNAME2).size());
-
-    assertNotNull(msc.getDatabase(DBNAME1));
-    assertEquals(3, msc.getDatabases("*").size());
-    assertEquals(3, msc.getAllDatabases().size());
-    assertEquals(1, msc.getDatabases(DBNAME1).size());
-
-    assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1"));
-    assertEquals(1, msc.getPartitionsByNames(DBNAME1, TAB2, 
Lists.newArrayList("name=value1")).size());
-
-    assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1));
-  }
-
-  @Test
-  public void testDummyFilterForTables() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      msc.getTable(DBNAME1, TAB1);
-      fail("getTable() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-      // Excepted
-    }
-    assertEquals(0, msc.getTables(DBNAME1, "*").size());
-    assertEquals(0, msc.getAllTables(DBNAME1).size());
-    assertEquals(0, msc.getTables(DBNAME1, TAB2).size());
-  }
-
-  @Test
-  public void testDummyFilterForDb() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      assertNotNull(msc.getDatabase(DBNAME1));
-      fail("getDatabase() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-        // Excepted
-    }
-    assertEquals(0, msc.getDatabases("*").size());
-    assertEquals(0, msc.getAllDatabases().size());
-    assertEquals(0, msc.getDatabases(DBNAME1).size());
-  }
-
-  @Test
-  public void testDummyFilterForPartition() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      assertNotNull(msc.getPartition(DBNAME1, TAB2, "name=value1"));
-      fail("getPartition() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-      // Excepted
-    }
-    assertEquals(0, msc.getPartitionsByNames(DBNAME1, TAB2,
-        Lists.newArrayList("name=value1")).size());
-  }
-
-  @Test
-  public void testDummyFilterForIndex() throws Exception {
-    DummyMetaStoreFilterHookImpl.blockResults = true;
-    try {
-      assertNotNull(msc.getIndex(DBNAME1, TAB1, INDEX1));
-      fail("getPartition() should fail with blocking mode");
-    } catch (NoSuchObjectException e) {
-      // Excepted
-    }
-  }
-
-}

Reply via email to