Repository: sentry Updated Branches: refs/heads/sentry-ha-redesign e7ec257a7 -> 1b65698cd
SENTRY-1324: Add sentry specific test cases to use NotificationLog (Sravya Tirukkovalur, Reviewed by: Hao Hao) Change-Id: Ifd1ea9a6101465be741ac1579356150b643e58bf Project: http://git-wip-us.apache.org/repos/asf/sentry/repo Commit: http://git-wip-us.apache.org/repos/asf/sentry/commit/1b65698c Tree: http://git-wip-us.apache.org/repos/asf/sentry/tree/1b65698c Diff: http://git-wip-us.apache.org/repos/asf/sentry/diff/1b65698c Branch: refs/heads/sentry-ha-redesign Commit: 1b65698cdf8bac61785c820a1df1d1b45a6f2354 Parents: e7ec257 Author: Sravya Tirukkovalur <[email protected]> Authored: Fri Jun 10 16:17:26 2016 -0700 Committer: Sravya Tirukkovalur <[email protected]> Committed: Tue Jul 5 16:30:05 2016 -0700 ---------------------------------------------------------------------- sentry-tests/sentry-tests-hive/pom.xml | 6 + .../AbstractTestWithStaticConfiguration.java | 21 +- ...actMetastoreTestWithStaticConfiguration.java | 34 +- ...ificationLogUsingDBNotificationListener.java | 351 +++++++++++++++++++ 4 files changed, 403 insertions(+), 9 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/sentry/blob/1b65698c/sentry-tests/sentry-tests-hive/pom.xml ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/pom.xml b/sentry-tests/sentry-tests-hive/pom.xml index 02bfa49..49bed3f 100644 --- a/sentry-tests/sentry-tests-hive/pom.xml +++ b/sentry-tests/sentry-tests-hive/pom.xml @@ -115,6 +115,12 @@ limitations under the License. </dependency> <dependency> <groupId>org.apache.hive.hcatalog</groupId> + <artifactId>hive-hcatalog-server-extensions</artifactId> + <version>${hive.version}</version> + <scope>test</scope> + </dependency> + <dependency> + <groupId>org.apache.hive.hcatalog</groupId> <artifactId>hive-hcatalog-core</artifactId> <scope>test</scope> </dependency> http://git-wip-us.apache.org/repos/asf/sentry/blob/1b65698c/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java index 2add2d0..2c4948e 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java @@ -16,10 +16,6 @@ */ package org.apache.sentry.tests.e2e.hive; -import static org.apache.sentry.core.common.utils.SentryConstants.AUTHORIZABLE_SPLITTER; -import static org.apache.sentry.core.common.utils.SentryConstants.PRIVILEGE_PREFIX; -import static org.apache.sentry.core.common.utils.SentryConstants.ROLE_SPLITTER; - import java.io.File; import java.io.IOException; import java.security.PrivilegedExceptionAction; @@ -69,6 +65,7 @@ import org.apache.sentry.service.thrift.KerberosConfiguration; import org.apache.sentry.service.thrift.SentryServiceClientFactory; import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig; import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig; + import org.apache.sentry.tests.e2e.hive.fs.DFS; import org.apache.sentry.tests.e2e.hive.fs.DFSFactory; import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServer; @@ -88,6 +85,10 @@ import javax.security.auth.Subject; import javax.security.auth.kerberos.KerberosPrincipal; import javax.security.auth.login.LoginContext; +import static org.apache.sentry.core.common.utils.SentryConstants.AUTHORIZABLE_SPLITTER; +import static org.apache.sentry.core.common.utils.SentryConstants.PRIVILEGE_PREFIX; +import static org.apache.sentry.core.common.utils.SentryConstants.ROLE_SPLITTER; + public abstract class AbstractTestWithStaticConfiguration { private static final Logger LOGGER = LoggerFactory .getLogger(AbstractTestWithStaticConfiguration.class); @@ -159,6 +160,7 @@ public abstract class AbstractTestWithStaticConfiguration { protected static boolean policyOnHdfs = false; protected static boolean useSentryService = false; protected static boolean setMetastoreListener = true; + protected static boolean useDbNotificationListener = false; protected static String testServerType = null; protected static boolean enableHiveConcurrency = false; // indicate if the database need to be clear for every test case in one test class @@ -507,10 +509,15 @@ public abstract class AbstractTestWithStaticConfiguration { startSentryService(); if (setMetastoreListener) { LOGGER.info("setMetastoreListener is enabled"); - properties.put(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname, - SentryMetastorePostEventListener.class.getName()); - } + if (useDbNotificationListener) { + properties.put(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname, + "org.apache.hive.hcatalog.listener.DbNotificationListener"); + } else { + properties.put(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS.varname, + SentryMetastorePostEventListener.class.getName()); + } + } } private static void startSentryService() throws Exception { http://git-wip-us.apache.org/repos/asf/sentry/blob/1b65698c/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java index f1e6d75..b72e317 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/AbstractMetastoreTestWithStaticConfiguration.java @@ -92,6 +92,26 @@ public abstract class AbstractMetastoreTestWithStaticConfiguration extends } + public void alterTableWithLocation(HiveMetaStoreClient client, + Table table, String location) + throws Exception { + table.getSd().setLocation(location); + client.alter_table(table.getDbName(), table.getTableName(), table); + } + + public void alterTableRename(HiveMetaStoreClient client, + Table table, String newDBName, String newTableName, String newLocation) + throws Exception { + String dbName = table.getDbName(); + String tableName = table.getTableName(); + table.setDbName(newDBName); + table.setTableName(newTableName); + if( newLocation != null ) { + table.getSd().setLocation(newLocation); + } + client.alter_table(dbName, tableName, table); + } + public Table createMetastoreTableWithPartition(HiveMetaStoreClient client, String dbName, String tabName, List<FieldSchema> cols, List<FieldSchema> partionVals) throws Exception { @@ -101,10 +121,20 @@ public abstract class AbstractMetastoreTestWithStaticConfiguration extends return client.getTable(dbName, tabName); } - public void addPartition(HiveMetaStoreClient client, String dbName, + public Partition addPartition(HiveMetaStoreClient client, String dbName, String tblName, List<String> ptnVals, Table tbl) throws Exception { Partition part = makeMetastorePartitionObject(dbName, tblName, ptnVals, tbl); - client.add_partition(part); + return client.add_partition(part); + } + + public void alterPartitionWithLocation(HiveMetaStoreClient client, Partition partition, String location) throws Exception { + partition.getSd().setLocation(location); + client.alter_partition(partition.getDbName(), partition.getTableName(), partition); + } + + public void dropPartition(HiveMetaStoreClient client, String dbName, + String tblName, List<String> ptnVals) throws Exception { + client.dropPartition(dbName, tblName, ptnVals); } public void addPartitionWithLocation(HiveMetaStoreClient client, http://git-wip-us.apache.org/repos/asf/sentry/blob/1b65698c/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestHMSNotificationLogUsingDBNotificationListener.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestHMSNotificationLogUsingDBNotificationListener.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestHMSNotificationLogUsingDBNotificationListener.java new file mode 100644 index 0000000..0b328d4 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/metastore/TestHMSNotificationLogUsingDBNotificationListener.java @@ -0,0 +1,351 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * <p> + * http://www.apache.org/licenses/LICENSE-2.0 + * <p> + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.sentry.tests.e2e.metastore; + +import com.google.common.collect.Lists; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hive.hcatalog.messaging.CreateDatabaseMessage; +import org.apache.hive.hcatalog.messaging.HCatEventMessage; +import org.apache.hive.hcatalog.messaging.MessageDeserializer; +import org.apache.hive.hcatalog.messaging.MessageFactory; +import org.apache.hive.hcatalog.messaging.CreateTableMessage; +import org.apache.hive.hcatalog.messaging.DropTableMessage; +import org.apache.hive.hcatalog.messaging.AlterTableMessage; +import org.apache.hive.hcatalog.messaging.AlterPartitionMessage; +import org.apache.hive.hcatalog.messaging.DropDatabaseMessage; +import org.apache.hive.hcatalog.messaging.AddPartitionMessage; +import org.apache.hive.hcatalog.messaging.DropPartitionMessage; +import org.apache.sentry.provider.file.PolicyFile; +import org.apache.sentry.tests.e2e.hive.StaticUserGroup; +import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory; +import org.hamcrest.text.IsEqualIgnoringCase; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +import org.junit.*; + +import java.io.File; +import java.util.ArrayList; +import java.util.Random; + +/** + * Make sure NotificationLog is capturing the information correctly for the commands which change <Obj,Location> mapping + */ +public class TestHMSNotificationLogUsingDBNotificationListener extends AbstractMetastoreTestWithStaticConfiguration { + + private PolicyFile policyFile; + + private static HiveMetaStoreClient client; + private static MessageDeserializer deserializer; + private static Random random = new Random(); + + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + setMetastoreListener = true; + useDbNotificationListener = true; + AbstractMetastoreTestWithStaticConfiguration.setupTestStaticConfiguration(); + client = context.getMetaStoreClient(ADMIN1); + deserializer = MessageFactory.getDeserializer("json", ""); + } + + @AfterClass + public static void cleanupAfterClass() throws Exception { + if (client != null) { + client.close(); + } + } + + @Override + @Before + public void setup() throws Exception { + policyFile = setAdminOnServer1(ADMINGROUP); + policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping()); + writePolicyFile(policyFile); + super.setup(); + } + + @Test + public void testCreateDropDatabase() throws Exception { + CurrentNotificationEventId latestID, previousID; + NotificationEventResponse response; + + String testDB = "N_db" + random.nextInt(Integer.SIZE - 1); + + // Create database + // We need: + // - Dbname + // - location + createMetastoreDB(client, testDB); + latestID = client.getCurrentNotificationEventId(); + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + CreateDatabaseMessage createDatabaseMessage = deserializer.getCreateDatabaseMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.CREATE_DATABASE, createDatabaseMessage.getEventType()); //Validate EventType + assertEquals(testDB, createDatabaseMessage.getDB()); //dbName + //Location information is not available + + //Alter database location and rename are not supported. See HIVE-4847 + + //Drop database + // We need: + // - dbName + // - location + client.dropDatabase(testDB); + previousID = latestID; + latestID = client.getCurrentNotificationEventId(); + assertEquals(previousID.getEventId() + 1, latestID.getEventId()); //Validate monotonically increasing eventID + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + DropDatabaseMessage dropDatabaseMessage = deserializer.getDropDatabaseMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.DROP_DATABASE, dropDatabaseMessage.getEventType()); //Event type + assertThat(dropDatabaseMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB)); // dbName + //Location information is not available, but we might not really need it as we can drop all paths associated with + //the object when we drop + } + + @Test + public void testCreateDropTableWithPartition() throws Exception { + String testDB = "N_db" + random.nextInt(Integer.SIZE - 1); + String testTable = "N_table" + random.nextInt(Integer.SIZE - 1); + + NotificationEventResponse response; + CurrentNotificationEventId latestID, previousID; + // Create database + createMetastoreDB(client, testDB); + + // Create table with partition + // We need: + // - dbname + // - tablename + // - location + createMetastoreTableWithPartition(client, testDB, + testTable, Lists.newArrayList(new FieldSchema("col1", "int", "")), + Lists.newArrayList(new FieldSchema("part_col1", "string", ""))); + latestID = client.getCurrentNotificationEventId(); + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.CREATE_TABLE, createTableMessage.getEventType()); + assertEquals(testDB, createTableMessage.getDB()); //dbName + assertEquals(testTable, createTableMessage.getTable()); //tableName + //Location information is not available + + //Drop table + // We need: + // - dbName + // - tableName + // - location + client.dropTable(testDB, testTable); + previousID = latestID; + latestID = client.getCurrentNotificationEventId(); + assertEquals(previousID.getEventId() + 1, latestID.getEventId()); + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + DropTableMessage dropTableMessage = deserializer.getDropTableMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.DROP_TABLE, dropTableMessage.getEventType()); + assertThat(dropTableMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB));//dbName + assertThat(dropTableMessage.getTable(), IsEqualIgnoringCase.equalToIgnoringCase(testTable));//tableName + //Location information is not available, but we might not really need it as we can drop all paths associated with + //the object when we drop + } + + @Test + public void testCreateDropTableWithoutPartition() throws Exception { + String testDB = "N_db" + random.nextInt(Integer.SIZE - 1); + String testTable = "N_table" + random.nextInt(Integer.SIZE - 1); + + NotificationEventResponse response; + CurrentNotificationEventId latestID, previousID; + // Create database + createMetastoreDB(client, testDB); + + // Create table with partition + // We need: + // - dbname + // - tablename + // - location + createMetastoreTable(client, testDB, testTable, Lists.newArrayList(new FieldSchema("col1", "int", ""))); + latestID = client.getCurrentNotificationEventId(); + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + CreateTableMessage createTableMessage = deserializer.getCreateTableMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.CREATE_TABLE, createTableMessage.getEventType()); + assertEquals(testDB, createTableMessage.getDB()); //dbName + assertEquals(testTable, createTableMessage.getTable()); //tableName + //Location information is not available + + //Drop table + // We need: + // - dbName + // - tableName + // - location + client.dropTable(testDB, testTable); + previousID = latestID; + latestID = client.getCurrentNotificationEventId(); + assertEquals(previousID.getEventId() + 1, latestID.getEventId()); + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + DropTableMessage dropTableMessage = deserializer.getDropTableMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.DROP_TABLE, dropTableMessage.getEventType()); + assertThat(dropTableMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB));//dbName + assertThat(dropTableMessage.getTable(), IsEqualIgnoringCase.equalToIgnoringCase(testTable));//tableName + //Location information is not available, but we might not really need it as we can drop all paths associated with + //the object when we drop + } + + @Test + public void testAddDropPartition() throws Exception { + String testDB = "N_db" + random.nextInt(Integer.SIZE - 1); + String testTable = "N_table" + random.nextInt(Integer.SIZE - 1); + + NotificationEventResponse response; + CurrentNotificationEventId latestID, previousID; + // Create database and table + createMetastoreDB(client, testDB); + Table tbl1 = createMetastoreTableWithPartition(client, testDB, testTable, Lists.newArrayList(new FieldSchema("col1", "int", "")), + Lists.newArrayList(new FieldSchema("part_col1", "string", ""))); + + ArrayList<String> partVals1 = Lists.newArrayList("part1"); + + //Add partition + // We need: + // - dbName + // - tableName + // - partition location + addPartition(client, testDB, testTable, partVals1, tbl1); + latestID = client.getCurrentNotificationEventId(); + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + AddPartitionMessage addPartitionMessage = deserializer.getAddPartitionMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.ADD_PARTITION, addPartitionMessage.getEventType()); + assertThat(addPartitionMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB));// dbName (returns lowered version) + assertThat(addPartitionMessage.getTable(), IsEqualIgnoringCase.equalToIgnoringCase(testTable));// tableName (returns lowered version) + //Location information is not available + + //Drop partition + // We need: + // - dbName + // - tableName + // - partition location + dropPartition(client, testDB, testTable, partVals1); + previousID = latestID; + latestID = client.getCurrentNotificationEventId(); + assertEquals(previousID.getEventId() + 1, latestID.getEventId()); + response = client.getNextNotification(latestID.getEventId() - 1, 1, null); + DropPartitionMessage dropPartitionMessage = deserializer.getDropPartitionMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.DROP_PARTITION, dropPartitionMessage.getEventType()); + assertThat(dropPartitionMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB)); //dbName + assertThat(dropPartitionMessage.getTable(), IsEqualIgnoringCase.equalToIgnoringCase(testTable)); //tableName + //Location information is not available + + } + + @Ignore("Needs Hive >= 1.1.2") + @Test + public void testAlterTableWithPartition() throws Exception { + String testDB = "N_db" + random.nextInt(Integer.SIZE - 1); + String testTable = "N_table" + random.nextInt(Integer.SIZE - 1); + + NotificationEventResponse response; + CurrentNotificationEventId latestID, previousID; + // Create database + createMetastoreDB(client, testDB); + + // Create table with partition + Table tbl1 = createMetastoreTableWithPartition(client, testDB, + testTable, Lists.newArrayList(new FieldSchema("col1", "int", "")), + Lists.newArrayList(new FieldSchema("part_col1", "string", ""))); + + //Alter table location + // We need: + // - dbName + // - tableName + // - old location + // - new location + String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR) + + File.separator + random.nextInt(Integer.SIZE - 1); + alterTableWithLocation(client, tbl1, tabDir1); + latestID = client.getCurrentNotificationEventId(); + response = client.getNextNotification(latestID.getEventId()-1, 1, null); + AlterTableMessage alterTableMessage = deserializer.getAlterTableMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.ALTER_TABLE, alterTableMessage.getEventType()); + assertThat(alterTableMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB));//dbName + assertThat(alterTableMessage.getTable(), IsEqualIgnoringCase.equalToIgnoringCase(testTable));//tableName + //Old location is not available: This information is lost if not captured at the time of event. + //New location is not available + + //Alter table rename managed table - location also changes + // We need: + // - oldDbName + // - newDbName + // - oldTableName + // - newTableName + // - old location + // - new location + String newDBName = testDB + random.nextInt(Integer.SIZE - 1); + String newTableName = testTable + random.nextInt(Integer.SIZE - 1); + String newLocation = tabDir1 + random.nextInt(Integer.SIZE - 1); + createMetastoreDB(client, newDBName); + alterTableRename(client, tbl1, newDBName, newTableName, newLocation); + previousID = latestID; + latestID = client.getCurrentNotificationEventId(); + assertEquals(previousID.getEventId() + 1, latestID.getEventId()); + response = client.getNextNotification(latestID.getEventId()-1, 1, null); + alterTableMessage = deserializer.getAlterTableMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.ALTER_TABLE, alterTableMessage.getEventType()); + assertThat(alterTableMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB));//oldDbName + assertThat(alterTableMessage.getTable(), IsEqualIgnoringCase.equalToIgnoringCase(testTable));//oldTableName + assertThat(response.getEvents().get(0).getDbName(), IsEqualIgnoringCase.equalToIgnoringCase(newDBName));//newDbName + assertThat(response.getEvents().get(0).getTableName(), IsEqualIgnoringCase.equalToIgnoringCase(newTableName));//newTableName + //Old location: This information is lost if not captured at the time of event. + //New location: Not sure how can we get this? Refresh all paths for every alter table add partition? + } + + @Ignore("Needs Hive >= 1.1.2") + @Test + public void testAlterPartition() throws Exception { + String testDB = "N_db" + random.nextInt(Integer.SIZE - 1); + String testTable = "N_table" + random.nextInt(Integer.SIZE - 1); + + NotificationEventResponse response; + CurrentNotificationEventId latestID; + // Create database + createMetastoreDB(client, testDB); + + // Create table with partition + Table tbl1 = createMetastoreTableWithPartition(client, testDB, + testTable, Lists.newArrayList(new FieldSchema("col1", "int", "")), + Lists.newArrayList(new FieldSchema("part_col1", "string", ""))); + ArrayList<String> partVals1 = Lists.newArrayList("part1"); + Partition partition = addPartition(client, testDB, testTable, partVals1, tbl1); + + + String warehouseDir = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR); + //Alter partition with location + // We need: + // - dbName + // - tableName + // - partition location + alterPartitionWithLocation(client, partition, warehouseDir + File.separator + "newpart"); + latestID = client.getCurrentNotificationEventId(); + response = client.getNextNotification(latestID.getEventId()-1, 1, null); + AlterPartitionMessage alterPartitionMessage = deserializer.getAlterPartitionMessage(response.getEvents().get(0).getMessage()); + assertEquals(HCatEventMessage.EventType.ALTER_PARTITION, alterPartitionMessage.getEventType()); + assertThat(alterPartitionMessage.getDB(), IsEqualIgnoringCase.equalToIgnoringCase(testDB));// dbName + assertThat(alterPartitionMessage.getTable(), IsEqualIgnoringCase.equalToIgnoringCase(testTable));// tableName + //Location information, not sure how can we get this? Refresh all paths for every alter table add partition? + } +} +
