Repository: sentry
Updated Branches:
  refs/heads/master 30f680774 -> bfb354f2b


http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
new file mode 100644
index 0000000..a9ff95d
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/metastore/TestMetastoreEndToEnd.java
@@ -0,0 +1,628 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.sentry.tests.e2e.metastore;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
+import java.io.File;
+import java.io.FileOutputStream;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Map;
+
+import org.junit.Assert;
+
+import org.apache.hadoop.hive.conf.HiveConf.ConfVars;
+import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.metastore.api.MetaException;
+import org.apache.hadoop.hive.metastore.api.Partition;
+import org.apache.hadoop.hive.metastore.api.Table;
+import org.apache.sentry.provider.file.PolicyFile;
+import org.apache.sentry.tests.e2e.hive.Context;
+import org.apache.sentry.tests.e2e.hive.StaticUserGroup;
+import org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import com.google.common.io.Resources;
+
+public class TestMetastoreEndToEnd extends
+    AbstractMetastoreTestWithStaticConfiguration {
+
+  private PolicyFile policyFile;
+  private File dataFile;
+  private static final String dbName = "db_1";
+  private static final String db_all_role = "all_db1";
+  private static final String uri_role = "uri_role";
+  private static final String tab1_all_role = "tab1_all_role";
+  private static final String tab1_read_role = "tab1_read_role";
+  private static final String tab2_all_role = "tab2_all_role";
+  private static final String tab2_read_role = "tab2_read_role";
+  private static final String tabName1 = "tab1";
+  private static final String tabName2 = "tab2";
+  private static final String tabName3 = "tab3";
+
+  @BeforeClass
+  public static void setupTestStaticConfiguration() throws Exception {
+    setMetastoreListener = false;
+    
AbstractMetastoreTestWithStaticConfiguration.setupTestStaticConfiguration();
+  }
+
+  @Override
+  @Before
+  public void setup() throws Exception {
+    policyFile = setAdminOnServer1(ADMINGROUP);
+    policyFile.setUserGroupMapping(StaticUserGroup.getStaticMapping());
+    writePolicyFile(policyFile);
+    super.setup();
+
+    dataFile = new File(dataDir, SINGLE_TYPE_DATA_FILE_NAME);
+    FileOutputStream to = new FileOutputStream(dataFile);
+    Resources.copy(Resources.getResource(SINGLE_TYPE_DATA_FILE_NAME), to);
+    to.close();
+
+    HiveMetaStoreClient client = null;
+    for (int i=0; i < 10; i++) {
+      try {
+        client = context.getMetaStoreClient(ADMIN1);
+        break;
+      } catch (Throwable e) {
+        // ignore
+      }
+      Thread.sleep(6000);
+    }
+    client.dropDatabase(dbName, true, true, true);
+    createMetastoreDB(client, dbName);
+    client.close();
+
+    policyFile
+            .addRolesToGroup(USERGROUP1, db_all_role)
+            .addRolesToGroup(USERGROUP2, "read_db_role")
+            .addRolesToGroup(USERGROUP2, tab1_all_role)
+            .addRolesToGroup(USERGROUP2, tab2_all_role)
+            .addRolesToGroup(USERGROUP3, tab1_read_role)
+            .addRolesToGroup(USERGROUP3, tab2_read_role)
+            .addPermissionsToRole(db_all_role, "server=server1->db=" + dbName)
+            .addPermissionsToRole("read_db_role",
+                    "server=server1->db=" + dbName + "->action=SELECT")
+            .addPermissionsToRole(tab1_all_role,
+                    "server=server1->db=" + dbName + "->table=" + tabName1)
+            .addPermissionsToRole(tab2_all_role,
+                    "server=server1->db=" + dbName + "->table=" + tabName2)
+            .addPermissionsToRole(tab1_read_role,
+                    "server=server1->db=" + dbName + "->table=" + tabName1 + 
"->action=SELECT")
+            .addPermissionsToRole(tab2_read_role,
+                    "server=server1->db=" + dbName + "->table=" + tabName2 + 
"->action=SELECT")
+            .setUserGroupMapping(StaticUserGroup.getStaticMapping());
+    writePolicyFile(policyFile);
+  }
+
+  @After
+  public void tearDown() throws Exception {
+    if (context != null) {
+      context.close();
+    }
+  }
+
+  /**
+   * Setup admin privileges for user ADMIN1 verify user can create DB and 
tables
+   * @throws Exception
+   */
+//  @Test
+//  public void testServerPrivileges() throws Exception {
+//    String tabName = "tab1";
+//    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+//    client.dropDatabase(dbName, true, true, true);
+//
+//    createMetastoreDB(client, dbName);
+//    createMetastoreTable(client, dbName, tabName,
+//        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+//    assertEquals(1, client.getTables(dbName, tabName).size());
+//
+//    AuthzPathsCache authzPathCache = new AuthzPathsCache(null, new 
String[]{"/"}, 0);
+//    SentryPolicyServiceClient sentryClient = new 
SentryServiceClientFactory().create(sentryConf);
+//    waitToCommit(authzPathCache, sentryClient);
+//    
assertEquals("/%PREFIX[data%DIR[db_1.db%AUTHZ_OBJECT#db_1[tab1%AUTHZ_OBJECT#db_1.tab1[]]]]",
 authzPathCache.serializeAllPaths());
+//    client.dropTable(dbName, tabName);
+//    client.dropDatabase(dbName, true, true, true);
+//    waitToCommit(authzPathCache, sentryClient);
+//    assertEquals("/%PREFIX[]", authzPathCache.serializeAllPaths());
+//  }
+//
+//  private void waitToCommit(AuthzPathsCache authzPathCache, 
SentryPolicyServiceClient sentryClient)
+//      throws Exception {
+//    SentryAuthzUpdate allUpdates = sentryClient.getAllUpdatesFrom(0, 0);
+//    for (HMSUpdate update : allUpdates.pathUpdates) {
+//      authzPathCache.handleUpdateNotification(update);
+//    }
+//    int counter = 0;
+//    while(!authzPathCache.areAllUpdatesCommited()) {
+//      Thread.sleep(200);
+//      counter++;
+//      if (counter > 10000) {
+//        fail("Updates taking too long to commit !!");
+//      }
+//    }
+//  }
+
+  /**
+   * verify non-admin user can not create or drop DB
+   * @throws Exception
+   */
+  @Test
+  public void testNegativeServerPrivileges() throws Exception {
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
+    try {
+      createMetastoreDB(client, "fooDb");
+      fail("Creat db should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    try {
+      createMetastoreDB(client, "barDb");
+      fail("create db should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+  }
+
+  /**
+   * Verify the user with DB permission can create table in that db Verify the
+   * user can't create table in DB where he doesn't have ALL permissions
+   * @throws Exception
+   */
+  @Test
+  public void testTablePrivileges() throws Exception {
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    createMetastoreTable(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    client.close();
+
+    client = context.getMetaStoreClient(USER1_1);
+    createMetastoreTable(client, dbName, tabName2,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    assertEquals(1, client.getTables(dbName, tabName2).size());
+    client.dropTable(dbName, tabName1);
+    createMetastoreTable(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    client.close();
+
+    // group2 users can't create the table, but can drop it
+    client = context.getMetaStoreClient(USER2_1);
+    try {
+      createMetastoreTable(client, dbName, "barTab",
+          Lists.newArrayList(new FieldSchema("col1", "int", "")));
+      fail("Create table should have failed for non-privilege user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.dropTable(dbName, tabName2);
+    client.close();
+
+    // group3 users can't create or drop it
+    client = context.getMetaStoreClient(USER3_1);
+    try {
+      createMetastoreTable(client, dbName, "barTab",
+          Lists.newArrayList(new FieldSchema("col1", "int", "")));
+      fail("Create table should have failed for non-privilege user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+
+    try {
+      client.dropTable(dbName, tabName1);
+      fail("drop table should have failed for non-privilege user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify alter table privileges
+   * @throws Exception
+   */
+  @Test
+  public void testAlterTablePrivileges() throws Exception {
+
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    createMetastoreTable(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")));
+    client.close();
+
+    // verify group1 users with DDL privileges can alter tables in db_1
+    client = context.getMetaStoreClient(USER1_1);
+    Table metaTable2 = client.getTable(dbName, tabName1);
+    metaTable2.getSd().setCols(
+        Lists.newArrayList(new FieldSchema("col2", "double", "")));
+    client.alter_table(dbName, tabName1, metaTable2);
+    Table metaTable3 = client.getTable(dbName, tabName1);
+    assertEquals(metaTable2.getSd().getCols(), metaTable3.getSd().getCols());
+
+    // verify group1 users with DDL privileges can alter tables in db_1
+    client = context.getMetaStoreClient(USER2_1);
+    metaTable2 = client.getTable(dbName, tabName1);
+    metaTable2.getSd().setCols(
+        Lists.newArrayList(new FieldSchema("col3", "string", "")));
+    client.alter_table(dbName, tabName1, metaTable2);
+    metaTable3 = client.getTable(dbName, tabName1);
+    assertEquals(metaTable2.getSd().getCols(), metaTable3.getSd().getCols());
+
+    // verify group3 users can't alter tables in db_1
+    client = context.getMetaStoreClient(USER3_1);
+    metaTable2 = client.getTable(dbName, tabName1);
+    metaTable2.getSd().setCols(
+        Lists.newArrayList(new FieldSchema("col3", "string", "")));
+    try {
+      client.alter_table(dbName, tabName1, metaTable2);
+      fail("alter table should have failed for non-privilege user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify add partition privileges
+   * @throws Exception
+   */
+  @Test
+  public void testAddPartitionPrivileges() throws Exception {
+    ArrayList<String> partVals1 = Lists.newArrayList("part1");
+    ArrayList<String> partVals2 = Lists.newArrayList("part2");
+    ArrayList<String> partVals3 = Lists.newArrayList("part3");
+    ArrayList<String> partVals4 = Lists.newArrayList("part4");
+
+    // user with ALL on DB should be able to add partition
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
+    Table tbl1 = createMetastoreTableWithPartition(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")),
+        Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
+    assertEquals(1, client.getTables(dbName, tabName1).size());
+    addPartition(client, dbName, tabName1, partVals1, tbl1);
+    addPartition(client, dbName, tabName1, partVals2, tbl1);
+    client.close();
+
+    // user with ALL on Table should be able to add partition
+    client = context.getMetaStoreClient(USER2_1);
+    tbl1 = client.getTable(dbName, tabName1);
+    addPartition(client, dbName, tabName1, partVals3, tbl1);
+    client.close();
+
+    // user without ALL on DB or Table should NOT be able to add partition
+    client = context.getMetaStoreClient(USER3_1);
+    tbl1 = client.getTable(dbName, tabName1);
+    try {
+      addPartition(client, dbName, tabName1, partVals4, tbl1);
+      fail("Add partition should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+
+    // user with ALL on DB should be able to drop partition
+    client = context.getMetaStoreClient(USER1_1);
+    tbl1 = client.getTable(dbName, tabName1);
+    client.dropPartition(dbName, tabName1, partVals1, true);
+    client.close();
+
+    // user with ALL on Table should be able to drop partition
+    client = context.getMetaStoreClient(USER2_1);
+    tbl1 = client.getTable(dbName, tabName1);
+    client.dropPartition(dbName, tabName1, partVals2, true);
+    client.close();
+
+    // user without ALL on DB or Table should NOT be able to drop partition
+    client = context.getMetaStoreClient(USER3_1);
+    try {
+      addPartition(client, dbName, tabName1, partVals3, tbl1);
+      fail("Drop partition should have failed for non-admin user");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+  }
+
+  /**
+   * Verify URI privileges for alter table table
+   * @throws Exception
+   */
+  @Test
+  public void testUriTablePrivileges() throws Exception {
+    String newPath1 = "fooTab1";
+    String newPath2 = "fooTab2";
+
+    String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath1;
+    String tabDir2 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath2;
+    policyFile.addRolesToGroup(USERGROUP1, uri_role)
+        .addRolesToGroup(USERGROUP2, uri_role)
+        .addRolesToGroup(USERGROUP3, db_all_role)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir2);
+    writePolicyFile(policyFile);
+
+    // user with URI privileges should be able to create table with that
+    // specific location
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
+    createMetastoreTableWithLocation(client, dbName, tabName1,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")), tabDir1);
+
+    createMetastoreTableWithLocation(client, dbName, tabName2,
+        Lists.newArrayList(new FieldSchema("col1", "int", "")), tabDir2);
+    client.close();
+
+    // user without URI privileges should be NOT able to create table with 
that specific location
+    client = context.getMetaStoreClient(USER3_1);
+    try {
+      createMetastoreTableWithLocation(client, dbName, "fooTab",
+          Lists.newArrayList(new FieldSchema("col1", "int", "")), tabDir2);
+      fail("Create table with location should fail without URI privilege");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+
+    // user with URI privileges should be able to alter table to set that 
specific location
+    client = context.getMetaStoreClient(USER1_1);
+    Table metaTable1 = client.getTable(dbName, tabName1);
+    metaTable1.getSd().setLocation(tabDir2);
+    client.alter_table(dbName, tabName1, metaTable1);
+    client.close();
+
+    // user with URI privileges and table all should be able to alter table to
+    // set that specific location
+    client = context.getMetaStoreClient(USER2_1);
+    metaTable1 = client.getTable(dbName, tabName2);
+    metaTable1.getSd().setLocation(tabDir1);
+    client.alter_table(dbName, tabName2, metaTable1);
+    client.close();
+
+    // user without URI privileges should be NOT able to alter table to set 
that
+    // specific location
+    client = context.getMetaStoreClient(USER3_1);
+    Table metaTable2 = client.getTable(dbName, tabName2);
+    metaTable2.getSd().setLocation(tabDir2);
+    try {
+      client.alter_table(dbName, tabName2, metaTable2);
+      fail("Alter table with location should fail without URI privilege");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify URI privileges for alter table table
+   * @throws Exception
+   */
+  @Test
+  public void testUriPartitionPrivileges() throws Exception {
+    String tabName1 = "tab1";
+    String newPath1 = "fooTab1";
+    String newPath2 = "fooTab2";
+    ArrayList<String> partVals1 = Lists.newArrayList("part1");
+    ArrayList<String> partVals2 = Lists.newArrayList("part2");
+    ArrayList<String> partVals3 = Lists.newArrayList("part2");
+
+    String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath1;
+    String tabDir2 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath2;
+    policyFile.addRolesToGroup(USERGROUP1, uri_role)
+        .addRolesToGroup(USERGROUP2, db_all_role)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir2);
+    writePolicyFile(policyFile);
+
+    // user with URI privileges should be able to alter partition to set that 
specific location
+    HiveMetaStoreClient client = context.getMetaStoreClient(USER1_1);
+    Table tbl1 = createMetastoreTableWithPartition(client, dbName,
+        tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")),
+        Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
+    addPartition(client, dbName, tabName1, partVals1, tbl1);
+    addPartitionWithLocation(client, dbName, tabName1, partVals2, tbl1,
+        tabDir1);
+    client.close();
+
+    // user without URI privileges should be NOT able to alter partition to set
+    // that specific location
+    client = context.getMetaStoreClient(USER2_1);
+    try {
+      tbl1 = client.getTable(dbName, tabName1);
+      addPartitionWithLocation(client, dbName, tabName1, partVals3,
+          tbl1, tabDir2);
+      fail("Add partition with location should have failed");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+  }
+
+  /**
+   * Verify alter partion privileges
+   * TODO: We seem to have a bit inconsistency with Alter partition. It's only
+   * allowed with SERVER privilege. If we allow add/drop partition with DB
+   * level privilege, then this should also be at the same level.
+   * @throws Exception
+   */
+  @Test
+  public void testAlterSetLocationPrivileges() throws Exception {
+    String newPath1 = "fooTab1";
+    ArrayList<String> partVals1 = Lists.newArrayList("part1");
+    ArrayList<String> partVals2 = Lists.newArrayList("part2");
+    String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath1;
+
+    policyFile.addRolesToGroup(USERGROUP1, uri_role)
+        .addRolesToGroup(USERGROUP2, uri_role)
+        .addPermissionsToRole(uri_role, "server=server1->URI=" + tabDir1);
+    writePolicyFile(policyFile);
+
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    Table tbl1 = createMetastoreTableWithPartition(client, dbName,
+        tabName1, Lists.newArrayList(new FieldSchema("col1", "int", "")),
+        Lists.newArrayList(new FieldSchema("part_col1", "string", "")));
+    addPartition(client, dbName, tabName1, partVals1, tbl1);
+    tbl1 = client.getTable(dbName, tabName1);
+    addPartition(client, dbName, tabName1, partVals2, tbl1);
+    client.close();
+
+    // user with DB and URI privileges should be able to alter partition set 
location
+    client = context.getMetaStoreClient(USER1_1);
+    Partition newPartition = client.getPartition(dbName, tabName1, partVals1);
+    newPartition.getSd().setLocation(tabDir1);
+    client.alter_partition(dbName, tabName1, newPartition);
+    client.close();
+
+    // user with Table and URI privileges should be able to alter partition 
set location
+    client = context.getMetaStoreClient(USER2_1);
+    newPartition = client.getPartition(dbName, tabName1, partVals2);
+    newPartition.getSd().setLocation(tabDir1);
+    client.alter_partition(dbName, tabName1, newPartition);
+    client.close();
+
+    policyFile.addRolesToGroup(USERGROUP3, db_all_role);
+    writePolicyFile(policyFile);
+    // user without URI privileges should not be able to alter partition set 
location
+    client = context.getMetaStoreClient(USER3_1);
+    newPartition = client.getPartition(dbName, tabName1, partVals2);
+    newPartition.getSd().setLocation(tabDir1);
+    try {
+      client.alter_partition(dbName, tabName1, newPartition);
+      fail("alter partition with location should have failed");
+    } catch (MetaException e) {
+      Context.verifyMetastoreAuthException(e);
+    }
+    client.close();
+
+  }
+
+  /**
+   * Verify data load into new partition using INSERT .. PARTITION statement
+   */
+  @Test
+  public void testPartionInsert() throws Exception {
+    String partVal1 = "part1", partVal2 = "part2";
+
+    policyFile.addRolesToGroup(USERGROUP1, uri_role).addPermissionsToRole(
+        uri_role, "server=server1->uri=file://" + dataFile.getPath());
+    writePolicyFile(policyFile);
+
+    execHiveSQL("CREATE TABLE " + dbName + "." + tabName1
+        + " (id int) PARTITIONED BY (part_col string)", USER1_1);
+    execHiveSQL("CREATE TABLE " + dbName + "." + tabName2 + " (id int)",
+        USER1_1);
+    execHiveSQL("LOAD DATA LOCAL INPATH '" + dataFile.getPath()
+        + "' INTO TABLE " + dbName + "." + tabName2, USER1_1);
+
+    // verify that user with DB all can add partition using INSERT .. PARTITION
+    execHiveSQL("INSERT OVERWRITE TABLE " + dbName + "." + tabName1
+        + " PARTITION (part_col='" + partVal1 + "') SELECT * FROM " + dbName
+        + "." + tabName2, USER1_1);
+    verifyPartitionExists(dbName, tabName1, partVal1);
+
+    // verify that user with Table all can add partition using INSERT
+    execHiveSQL("INSERT OVERWRITE TABLE " + dbName + "." + tabName1
+        + " PARTITION (part_col='" + partVal2 + "') SELECT * FROM " + dbName
+        + "." + tabName2, USER2_1);
+    verifyPartitionExists(dbName, tabName1, partVal2);
+
+    // verify that user with Table all can add dynamic partition using INSERT
+    Map<String, String> dynamicInsertProperties = Maps.newHashMap();
+    dynamicInsertProperties.put(ConfVars.DYNAMICPARTITIONING.varname, "true");
+    dynamicInsertProperties.put(ConfVars.DYNAMICPARTITIONINGMODE.varname,
+        "nonstrict");
+
+    execHiveSQL("CREATE TABLE " + dbName + "." + tabName3
+        + " (id int) PARTITIONED BY (part_col string)", USER1_1);
+    execHiveSQLwithOverlay("INSERT OVERWRITE TABLE " + dbName + "." + tabName3
+        + " partition (part_col) SELECT id, part_col FROM "
+        + dbName + "." + tabName1, USER1_1, dynamicInsertProperties);
+  }
+
+  @Test
+  public void testAddPartion() throws Exception {
+    String partVal1 = "part1", partVal2 = "part2";
+    String newPath1 = "fooTab1";
+    String tabDir1 = hiveServer.getProperty(HiveServerFactory.WAREHOUSE_DIR)
+        + File.separator + newPath1;
+
+    //URI privilege required when "using location"
+    policyFile.addRolesToGroup(USERGROUP1, uri_role).addPermissionsToRole(
+        uri_role, "server=server1->URI=" + tabDir1);
+    writePolicyFile(policyFile);
+
+    execHiveSQL("DROP TABLE IF EXISTS " + dbName + "." + tabName1, USER1_1);
+    execHiveSQL("CREATE TABLE " + dbName + "." + tabName1
+        + " (id int) PARTITIONED BY (part_col string)", USER1_1);
+
+    //User with all on table
+    execHiveSQL("ALTER TABLE " + dbName + "." + tabName1
+        + " ADD PARTITION (part_col ='" + partVal1 + "')", USER1_1);
+    verifyPartitionExists(dbName, tabName1, partVal1);
+
+    execHiveSQL("ALTER TABLE " + dbName + "." + tabName1
+        + " ADD PARTITION (part_col ='" + partVal2 +  "') location '"
+        + tabDir1 + "'", USER1_1);
+    verifyPartitionExists(dbName, tabName1, partVal2);
+
+    try {
+      execHiveSQL("ALTER TABLE " + dbName + "." + tabName1
+          + " ADD PARTITION (part_col ='" + partVal2 + "') location '"
+          + tabDir1 + "'", USER2_1);
+      fail("alter table should have failed due to URI privilege missed");
+    } catch (IOException e) {
+      // Expected error
+    }
+
+  }
+
+
+  @Test
+  public void testInsertInto() throws Exception {
+    String partVal1 = "part1";
+
+    writePolicyFile(policyFile);
+
+    execHiveSQL("DROP TABLE IF EXISTS " + dbName + "." + tabName1, USER1_1);
+    execHiveSQL("CREATE TABLE " + dbName + "." + tabName1
+        + " (id int) PARTITIONED BY (part_col string)", USER1_1);
+
+    execHiveSQL("INSERT INTO " + dbName + "." + tabName1 +
+        " PARTITION(part_col ='" + partVal1 + "') select 1 from " + dbName + 
"." + tabName1, USER2_1);
+    verifyPartitionExists(dbName, tabName1, partVal1);
+
+  }
+
+  private void verifyPartitionExists(String dbName, String tabName,
+      String partVal) throws Exception {
+    HiveMetaStoreClient client = context.getMetaStoreClient(ADMIN1);
+    Partition newPartition = client.getPartition(dbName, tabName,
+        Lists.newArrayList(partVal));
+    Assert.assertNotNull(newPartition);
+    client.close();
+  }
+}

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java
 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java
new file mode 100644
index 0000000..054b193
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/InternalSentrySrv.java
@@ -0,0 +1,270 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.tests.e2e.minisentry;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.concurrent.TimeoutException;
+import java.util.concurrent.atomic.AtomicLong;
+
+import org.apache.curator.test.TestingServer;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.sentry.service.thrift.SentryService;
+import org.apache.sentry.service.thrift.SentryServiceFactory;
+import org.apache.sentry.service.thrift.ServiceConstants.ClientConfig;
+import org.apache.sentry.service.thrift.ServiceConstants.ServerConfig;
+import org.apache.thrift.protocol.TProtocol;
+import org.apache.thrift.server.ServerContext;
+import org.apache.thrift.server.TServerEventHandler;
+import org.apache.thrift.transport.TTransport;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+
+public class InternalSentrySrv implements SentrySrv {
+
+  public static class SentryServerContext implements ServerContext {
+    private long contextId;
+
+    public SentryServerContext(long contextId) {
+      this.contextId = contextId;
+    }
+
+    public long getContextId() {
+      return contextId;
+    }
+  }
+
+  /**
+   * Thrift even handler class to track client connections to Sentry service
+   */
+  public static class SentryThriftEvenHandler implements TServerEventHandler {
+    // unique id for each client connection. We could see multiple simultaneous
+    // client connections, some make it thread safe.
+    private AtomicLong clientId = new AtomicLong();
+    // Lists of clientId currently connected
+    private List<Long> clientList = Lists.newArrayList();
+
+    /**
+     * Thrift callback when a new client is connecting
+     */
+    @Override
+    public ServerContext createContext(TProtocol inputProto,
+        TProtocol outputProto) {
+      clientList.add(clientId.incrementAndGet());
+      LOGGER.info("Client Connected: " + clientId.get());
+      return new SentryServerContext(clientId.get());
+    }
+
+    /**
+     * Thrift callback when a client is disconnecting
+     */
+    @Override
+    public void deleteContext(ServerContext arg0, TProtocol arg1, TProtocol 
arg2) {
+      clientList.remove(((SentryServerContext) arg0).getContextId());
+      LOGGER.info("Client Disonnected: "
+          + ((SentryServerContext) arg0).getContextId());
+    }
+
+    @Override
+    public void preServe() {
+    }
+
+    @Override
+    public void processContext(ServerContext arg0, TTransport arg1,
+        TTransport arg2) {
+    }
+
+    public long getClientCount() {
+      return clientList.size();
+    }
+
+    public List<Long> getClienList() {
+      return clientList;
+    }
+
+    public long getClientId() {
+      return clientId.get();
+    }
+  }
+
+  private List<SentryService> sentryServers = Lists.newArrayList();
+  private static TestingServer zkServer; // created only if in case of HA
+  private static final Logger LOGGER = LoggerFactory
+      .getLogger(InternalSentrySrv.class);
+  private boolean isActive = false;
+
+  public InternalSentrySrv(Configuration sentryConf, int numServers)
+      throws Exception {
+    // Enable HA when numServers is more that 1, start Curator TestingServer
+    if (numServers > 1) {
+      zkServer = new TestingServer();
+      zkServer.start();
+      sentryConf.setBoolean(ServerConfig.SENTRY_HA_ENABLED, true);
+      sentryConf.set(ServerConfig.SENTRY_HA_ZOOKEEPER_QUORUM,
+          zkServer.getConnectString());
+    } else if (numServers <= 0) {
+      throw new IllegalArgumentException("Invalid number of Servers: "
+          + numServers + " ,must be > 0");
+    }
+    for (int count = 0; count < numServers; count++) {
+      Configuration servConf = new Configuration(sentryConf);
+      SentryService sentryServer = new SentryServiceFactory().create(servConf);
+      servConf.set(ClientConfig.SERVER_RPC_ADDRESS, sentryServer.getAddress()
+          .getHostName());
+      servConf.setInt(ClientConfig.SERVER_RPC_PORT, sentryServer.getAddress()
+          .getPort());
+      sentryServers.add(sentryServer);
+    }
+    isActive = true;
+  }
+
+  @Override
+  public void startAll() throws Exception {
+    if (!isActive) {
+      throw new IllegalStateException("SentrySrv is no longer active");
+    }
+    for (int sentryServerNum = 0; sentryServerNum < sentryServers.size(); 
sentryServerNum++) {
+      start(sentryServerNum);
+    }
+  }
+
+  @Override
+  public void start(int serverNum) throws Exception {
+    if (!isActive) {
+      throw new IllegalStateException("SentrySrv is no longer active");
+    }
+    SentryService sentryServer = sentryServers.get(serverNum);
+    sentryServer.start();
+
+    // wait for startup
+    final long start = System.currentTimeMillis();
+    while (!sentryServer.isRunning()) {
+      Thread.sleep(1000);
+      if (System.currentTimeMillis() - start > 60000L) {
+        throw new TimeoutException("Server did not start after 60 seconds");
+      }
+    }
+    sentryServer.setThriftEventHandler(new SentryThriftEvenHandler());
+  }
+
+  @Override
+  public void stopAll() throws Exception {
+    boolean cleanStop = true;
+    if (!isActive) {
+      throw new IllegalStateException("SentrySrv is no longer active");
+    }
+    for (int sentryServerNum = 0; sentryServerNum < sentryServers.size(); 
sentryServerNum++) {
+      try {
+        stop(sentryServerNum);
+      } catch (Exception e) {
+        LOGGER.error("Sentry Server " + sentryServerNum + " failed to stop");
+        cleanStop = false;
+      }
+    }
+    if (!cleanStop) {
+      throw new IllegalStateException(
+          "At least one of the servers failed to stop cleanly");
+    }
+  }
+
+  @Override
+  public void stop(int serverNum) throws Exception {
+    if (!isActive) {
+      throw new IllegalStateException("SentrySrv is no longer active");
+    }
+    SentryService sentryServer = sentryServers.get(serverNum);
+    sentryServer.stop();
+  }
+
+  @Override
+  public void close() {
+    for (SentryService sentryServer : sentryServers) {
+      try {
+        sentryServer.stop();
+      } catch (Exception e) {
+        LOGGER.error("Error stoping Sentry service ", e);
+      }
+    }
+    if (zkServer != null) {
+      try {
+        zkServer.stop();
+      } catch (IOException e) {
+        LOGGER.warn("Error stoping ZK service ", e);
+      }
+    }
+    sentryServers.clear();
+    isActive = false;
+  }
+
+  @Override
+  public SentryService get(int serverNum) {
+    return sentryServers.get(serverNum);
+  }
+
+  @Override
+  public String getZKQuorum() throws Exception {
+    if (zkServer == null) {
+      throw new IOException("Sentry HA is not enabled");
+    }
+    return zkServer.getConnectString();
+  }
+
+  @Override
+  public boolean isHaEnabled() {
+    return zkServer != null;
+  }
+
+  @Override
+  public long getNumActiveClients(int serverNum) {
+    SentryThriftEvenHandler thriftHandler = (SentryThriftEvenHandler) get(
+        serverNum).getThriftEventHandler();
+    LOGGER.warn("Total clients: " + thriftHandler.getClientId());
+    for (Long clientId: thriftHandler.getClienList()) {
+      LOGGER.warn("Got clients: " + clientId);
+    }
+    return thriftHandler.getClientCount();
+  }
+
+  @Override
+  public long getNumActiveClients() {
+    long numClients = 0;
+    for (int sentryServerNum = 0; sentryServerNum < sentryServers.size(); 
sentryServerNum++) {
+      numClients += getNumActiveClients(sentryServerNum);
+    }
+    return numClients;
+
+  }
+
+  @Override
+  public long getTotalClients() {
+    long totalClients = 0;
+    for (int sentryServerNum = 0; sentryServerNum < sentryServers.size(); 
sentryServerNum++) {
+      totalClients += getTotalClients(sentryServerNum);
+    }
+    return totalClients;
+  }
+
+  @Override
+  public long getTotalClients(int serverNum) {
+    SentryThriftEvenHandler thriftHandler = (SentryThriftEvenHandler) get(
+        serverNum).getThriftEventHandler();
+    return thriftHandler.getClientId();
+  }
+}

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java
 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java
new file mode 100644
index 0000000..dac1151
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrv.java
@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.tests.e2e.minisentry;
+
+import org.apache.sentry.service.thrift.SentryService;
+
+public interface SentrySrv {
+
+  /**
+   * Start all the sentry services
+   * @throws Exception
+   */
+  void startAll() throws Exception;
+
+  /**
+   * Start the given server
+   * @param serverNum
+   *          - Server number (0 to N-1)
+   * @throws Exception
+   */
+  void start(int serverNum) throws Exception ;
+
+  /**
+   * Stop all the Sentry servers
+   * @throws Exception
+   */
+  void stopAll() throws Exception;
+
+  /**
+   * Stop the specified Sentry server
+   * @param serverNum
+   *          - Server number (0 to N-1)
+   * @throws Exception
+   */
+  void stop(int serverNum) throws Exception ;
+
+  /**
+   * Get the underlying Sentry service object
+   * @param serverNum
+   *          - Server number (0 to N-1)
+   * @return
+   */
+  SentryService get(int serverNum);
+
+  /**
+   * Get the ZK connection string
+   * @return
+   * @throws Exception
+   *           - If HA is not enabled
+   */
+  String getZKQuorum() throws Exception;
+
+  /**
+   * Stop all the nodes and ZK if started. The SentrySrv can't be reused once
+   * closed.
+   */
+  void close();
+
+  /**
+   * Check if the sentry server is created with HA enabled.
+   * @return True - HA is enabled False - HA is not enabled
+   */
+  boolean isHaEnabled();
+
+  /**
+   * Get the number of active clients connections across servers
+   */
+  long getNumActiveClients();
+
+  /**
+   * Get the number of active clients connections for the given server
+   */
+  long getNumActiveClients(int serverNum);
+
+  /**
+   * Get the total number of clients connected so far
+   */
+  long getTotalClients();
+
+  /**
+   * Get the total number of clients connected so far
+   */
+  long getTotalClients(int serverNum);
+
+}

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrvFactory.java
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrvFactory.java
 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrvFactory.java
new file mode 100644
index 0000000..9381e88
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/java/org/apache/sentry/tests/e2e/minisentry/SentrySrvFactory.java
@@ -0,0 +1,45 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.sentry.tests.e2e.minisentry;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+
+public class SentrySrvFactory {
+
+  public static enum SentrySrvType {
+    INTERNAL_SERVER, EXTERNAL_SERVER
+  }
+
+  public static SentrySrv create(SentrySrvType srvType,
+      Configuration sentryConf)
+          throws Exception {
+    return create(srvType, sentryConf, 1);
+  }
+
+  public static SentrySrv create(SentrySrvType srvType,
+      Configuration sentryConf,
+      int numServers) throws Exception {
+    if (!srvType.equals(SentrySrvType.INTERNAL_SERVER)) {
+      throw new IOException("Server type " + srvType.name()
+          + " is not supported");
+    }
+    return new InternalSentrySrv(sentryConf, numServers);
+  }
+}

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/core-site-for-sentry-test.xml
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/resources/core-site-for-sentry-test.xml
 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/core-site-for-sentry-test.xml
new file mode 100644
index 0000000..01b8576
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/core-site-for-sentry-test.xml
@@ -0,0 +1,34 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>hadoop.security.group.mapping</name>
+    
<value>org.apache.sentry.tests.e2e.hive.fs.MiniDFS$PseudoGroupMappingService</value>
+  </property>
+  <property>
+    <name>fs.permissions</name>
+    <value>false</value>
+  </property>
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>000</value>
+  </property>
+</configuration>
+

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/emp.dat
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive-v2/src/test/resources/emp.dat 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/emp.dat
new file mode 100644
index 0000000..5922b20
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive-v2/src/test/resources/emp.dat
@@ -0,0 +1,12 @@
+16|john
+17|robert
+18|andrew
+19|katty
+21|tom
+22|tim
+23|james
+24|paul
+27|edward
+29|alan
+31|kerry
+34|terri
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/hadoop
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive-v2/src/test/resources/hadoop 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/hadoop
new file mode 100755
index 0000000..914d3db
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive-v2/src/test/resources/hadoop
@@ -0,0 +1,107 @@
+#!/usr/bin/env bash
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script runs the hadoop core commands. 
+
+bin=`which $0`
+bin=`dirname ${bin}`
+bin=`cd "$bin"; pwd`
+ 
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# check envvars which might override default args
+if [ "$HADOOP_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$HADOOP_HEAPSIZE""m"
+fi
+
+if [ "$SENTRY_HADOOP_TEST_CLASSPATH" != "" ]; then
+  CLASSPATH=${CLASSPATH}:${SENTRY_HADOOP_TEST_CLASSPATH}
+  echo "Got Sentry classpath ${SENTRY_HADOOP_TEST_CLASSPATH}"
+else    
+  echo "Error: SENTRY_HADOOP_TEST_CLASSPATH not defined."
+  exit 1  
+fi
+DEFAULT_LIBEXEC_DIR="$bin"/../libexec
+HADOOP_LIBEXEC_DIR=${HADOOP_LIBEXEC_DIR:-$DEFAULT_LIBEXEC_DIR}
+
+function print_usage(){
+  echo "Usage: hadoop [--config confdir] COMMAND"
+  echo "       where COMMAND is one of:"
+  echo "  fs                   run a generic filesystem user client"
+  echo "  version              print the version"
+  echo "  jar <jar>            run a jar file"
+  echo ""
+  echo "Most commands print help when invoked w/o parameters."
+}
+
+if [ $# = 0 ]; then
+  print_usage
+  exit
+fi
+
+COMMAND=$1
+case $COMMAND in
+  # usage flags
+  --help|-help|-h)
+    print_usage
+    exit
+    ;;
+
+
+  classpath)
+    echo $CLASSPATH
+    exit
+    ;;
+
+  #core commands  
+  *)
+    # the core commands
+    if [ "$COMMAND" = "fs" ] ; then
+      CLASS=org.apache.hadoop.fs.FsShell
+    elif [ "$COMMAND" = "version" ] ; then
+      CLASS=org.apache.hadoop.util.VersionInfo
+    elif [ "$COMMAND" = "jar" ] ; then
+      CLASS=org.apache.hadoop.util.RunJar
+    elif [ "$COMMAND" = "checknative" ] ; then
+      CLASS=org.apache.hadoop.util.NativeLibraryChecker
+    elif [ "$COMMAND" = "distcp" ] ; then
+      CLASS=org.apache.hadoop.tools.DistCp
+      CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+    elif [ "$COMMAND" = "archive" ] ; then
+      CLASS=org.apache.hadoop.tools.HadoopArchives
+      CLASSPATH=${CLASSPATH}:${TOOL_PATH}
+    elif [[ "$COMMAND" = -*  ]] ; then
+        # class and package names cannot begin with a -
+        echo "Error: No command named \`$COMMAND' was found. Perhaps you meant 
\`hadoop ${COMMAND#-}'"
+        exit 1
+    else
+      CLASS=$COMMAND
+    fi
+    shift
+    
+    # Always respect HADOOP_OPTS and HADOOP_CLIENT_OPTS
+    HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"
+
+    #make sure security appender is turned off
+    HADOOP_OPTS="$HADOOP_OPTS 
-Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+
+    export CLASSPATH=$CLASSPATH
+    exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
+    ;;
+
+esac

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/kv1.dat
----------------------------------------------------------------------
diff --git a/sentry-tests/sentry-tests-hive-v2/src/test/resources/kv1.dat 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/kv1.dat
new file mode 100644
index 0000000..20fb0dc
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive-v2/src/test/resources/kv1.dat
@@ -0,0 +1,500 @@
+238
+86
+311
+27
+165
+409
+255
+278
+98
+484
+265
+193
+401
+150
+273
+224
+369
+66
+128
+213
+146
+406
+429
+374
+152
+469
+145
+495
+37
+327
+281
+277
+209
+15
+82
+403
+166
+417
+430
+252
+292
+219
+287
+153
+193
+338
+446
+459
+394
+237
+482
+174
+413
+494
+207
+199
+466
+208
+174
+399
+396
+247
+417
+489
+162
+377
+397
+309
+365
+266
+439
+342
+367
+325
+167
+195
+475
+17
+113
+155
+203
+339
+0
+455
+128
+311
+316
+57
+302
+205
+149
+438
+345
+129
+170
+20
+489
+157
+378
+221
+92
+111
+47
+72
+4
+280
+35
+427
+277
+208
+356
+399
+169
+382
+498
+125
+386
+437
+469
+192
+286
+187
+176
+54
+459
+51
+138
+103
+239
+213
+216
+430
+278
+176
+289
+221
+65
+318
+332
+311
+275
+137
+241
+83
+333
+180
+284
+12
+230
+181
+67
+260
+404
+384
+489
+353
+373
+272
+138
+217
+84
+348
+466
+58
+8
+411
+230
+208
+348
+24
+463
+431
+179
+172
+42
+129
+158
+119
+496
+0
+322
+197
+468
+393
+454
+100
+298
+199
+191
+418
+96
+26
+165
+327
+230
+205
+120
+131
+51
+404
+43
+436
+156
+469
+468
+308
+95
+196
+288
+481
+457
+98
+282
+197
+187
+318
+318
+409
+470
+137
+369
+316
+169
+413
+85
+77
+0
+490
+87
+364
+179
+118
+134
+395
+282
+138
+238
+419
+15
+118
+72
+90
+307
+19
+435
+10
+277
+273
+306
+224
+309
+389
+327
+242
+369
+392
+272
+331
+401
+242
+452
+177
+226
+5
+497
+402
+396
+317
+395
+58
+35
+336
+95
+11
+168
+34
+229
+233
+143
+472
+322
+498
+160
+195
+42
+321
+430
+119
+489
+458
+78
+76
+41
+223
+492
+149
+449
+218
+228
+138
+453
+30
+209
+64
+468
+76
+74
+342
+69
+230
+33
+368
+103
+296
+113
+216
+367
+344
+167
+274
+219
+239
+485
+116
+223
+256
+263
+70
+487
+480
+401
+288
+191
+5
+244
+438
+128
+467
+432
+202
+316
+229
+469
+463
+280
+2
+35
+283
+331
+235
+80
+44
+193
+321
+335
+104
+466
+366
+175
+403
+483
+53
+105
+257
+406
+409
+190
+406
+401
+114
+258
+90
+203
+262
+348
+424
+12
+396
+201
+217
+164
+431
+454
+478
+298
+125
+431
+164
+424
+187
+382
+5
+70
+397
+480
+291
+24
+351
+255
+104
+70
+163
+438
+119
+414
+200
+491
+237
+439
+360
+248
+479
+305
+417
+199
+444
+120
+429
+169
+443
+323
+325
+277
+230
+478
+178
+468
+310
+317
+333
+493
+460
+207
+249
+265
+480
+83
+136
+353
+172
+214
+462
+233
+406
+133
+175
+189
+454
+375
+401
+421
+407
+384
+256
+26
+134
+67
+384
+379
+18
+462
+492
+100
+298
+9
+341
+498
+146
+458
+362
+186
+285
+348
+167
+18
+273
+183
+281
+344
+97
+469
+315
+84
+28
+37
+448
+152
+348
+307
+194
+414
+477
+222
+126
+90
+169
+403
+400
+200
+97

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/log4j.properties
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/resources/log4j.properties 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/log4j.properties
new file mode 100644
index 0000000..d941816
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive-v2/src/test/resources/log4j.properties
@@ -0,0 +1,35 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+# Define some default values that can be overridden by system properties.
+#
+# For testing, it may also be convenient to specify
+
+sentry.root.logger=INFO,console
+log4j.rootLogger=${sentry.root.logger}
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d (%t) [%p - %l] %m%n
+
+log4j.logger.org.apache.hadoop.conf.Configuration=ERROR
+log4j.logger.org.apache.sentry=DEBUG
+
+log4j.category.DataNucleus=ERROR

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-provider.ini
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-provider.ini 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-provider.ini
new file mode 100644
index 0000000..014d827
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-provider.ini
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[groups]
+foo = all_default
+
+[roles]
+all_default = server=server1->db=default
+
+[users]
+foo = foo

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-site.xml
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-site.xml 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-site.xml
new file mode 100644
index 0000000..d068011
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive-v2/src/test/resources/sentry-site.xml
@@ -0,0 +1,33 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>sentry.provider</name>
+    <value>invalid</value>
+  </property>
+  <property>
+    <name>sentry.hive.provider.resource</name>
+    <value>invalid</value>
+  </property>
+  <property>
+    <name>sentry.hive.server</name>
+    <value>myHS2</value>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImport.ini
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImport.ini 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImport.ini
new file mode 100644
index 0000000..15fc5bf
--- /dev/null
+++ b/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImport.ini
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[groups]
+group1=roleImport1,roleImport2
+group2=roleImport1,roleImport2,roleImport3
+group3=roleImport2,roleImport3
+[roles]
+roleImport1=server=server1,server=server1->action=select->grantoption=false,server=server1->db=db2->action=insert->grantoption=true,server=server1->db=db1->table=tbl1->action=insert
+roleImport2=server=server1->db=db2->action=insert->grantoption=true,server=server1->db=db1->table=tbl1->action=insert,server=server1->db=db1->table=tbl2->column=col1->action=insert,server=server1->db=db1->table=tbl3->column=col1->action=*->grantoption=true
+roleImport3=server=server1->db=db1->table=tbl2->column=col1->action=insert,server=server1->db=db1->table=tbl3->column=col1->action=*->grantoption=true,server=server1->db=db1->table=tbl4->column=col1->action=all->grantoption=true,server=server1->uri=hdfs://testserver:9999/path2->action=insert

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportAdmin.ini
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportAdmin.ini
 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportAdmin.ini
new file mode 100644
index 0000000..c778d05
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportAdmin.ini
@@ -0,0 +1,22 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[groups]
+admin=adminRole
+
+[roles]
+adminRole=server=server1

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportError.ini
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportError.ini
 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportError.ini
new file mode 100644
index 0000000..4d53f2b
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/resources/testPolicyImportError.ini
@@ -0,0 +1,21 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+[groups]
+group1=roleImport1
+[roles]
+roleImport1=server->db=db_1

http://git-wip-us.apache.org/repos/asf/sentry/blob/bfb354f2/sentry-tests/sentry-tests-hive-v2/src/test/scripts/scale-test/create-many-dbs-tables.sh
----------------------------------------------------------------------
diff --git 
a/sentry-tests/sentry-tests-hive-v2/src/test/scripts/scale-test/create-many-dbs-tables.sh
 
b/sentry-tests/sentry-tests-hive-v2/src/test/scripts/scale-test/create-many-dbs-tables.sh
new file mode 100755
index 0000000..dcdddeb
--- /dev/null
+++ 
b/sentry-tests/sentry-tests-hive-v2/src/test/scripts/scale-test/create-many-dbs-tables.sh
@@ -0,0 +1,277 @@
+#!/usr/bin/env bash
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+
+# This script means to create many testing objects (database, tables,
+# partitions and a wide table with many partitions). The way to run it:
+# !/usr/bin/env bash
+# export HS2="HOSTNAME"
+# export REALM="REALM.NAME"
+# bash /root/tests/create-many-dbs-tables.sh &
+# bash /root/tests/create-many-dbs-tables.sh &
+
+if [[ ${HS2} == "" ]]; then
+  echo "error: need to export HS2=hostname"
+  exit 1
+fi
+
+if [[ ${REALM} == "" ]]; then
+  echo "error: need to export REALM"
+  exit 1
+fi
+
+# Define default test scale
+NUM_OF_DATABASES=60
+NUM_OF_TABLES_PER_DATABASE=20
+NUM_OF_ROLES_FOR_DATABASES=60 # <= NUM_OF_DATABASES
+NUM_OF_ROLES_FOR_TABLES_PER_DATABASE=5 # <= NUM_OF_TABLES_PER_DATABASE
+NUM_OF_GROUPS=60 # >= NUM_OF_DATABASES
+
+# Number of partitions varies between max and min
+MAX_NUM_OF_PARTITIONS_PER_TABLE=10
+MIN_NUM_OF_PARTITIONS_PER_TABLE=2
+
+BASE_EXTERNAL_DIR="/data"
+LOCAL_OUTPUT_DIR="/tmp"
+BL="beeline -n hive -p hive --silent=true -u 
'jdbc:hive2://${HS2}:10000/default;principal=hive/_HOST@${REALM}'"
+
+# Number of external partitions wide tables have
+declare -a NUM_OF_WIDE_TABLE_PARTITIONS=(10 100 1000)
+wLen=${#NUM_OF_WIDE_TABLE_PARTITIONS[@]}
+
+process_id=$$
+
+while getopts "d:t:g:b:l" OPTION
+do     case "${OPTION}" in
+  b)      BASE_EXTERNAL_DIR="$OPTARG";;
+  d)     NUM_OF_DATABASES="$OPTARG";;
+  l)      LOCAL_OUTPUT_DIR="$OPTARG";;
+  t)      NUM_OF_TABLES_PER_DATABASE="$OPTARG";;
+  g)     NUM_OF_GROUPS="$OPTARG";;
+  [?])   print >&2 "Usage: $0 [-b BASE_EXTERNAL_DIR] [-d NUM_OF_DATABASES] [-l 
LOCAL_OUTPUT_DIR] [-t NUM_OF_TABLES_PER_DATABASE] [-g NUM_OF_GROUPS]"
+         exit 1;;
+       esac
+done
+
+NUM_OF_PERMISSIONS=$(( NUM_OF_ROLES_FOR_DATABASES + 
NUM_OF_ROLES_FOR_TABLES_PER_DATABASE * NUM_OF_DATABASES))
+AVG_NUM_OF_PARTITIONS_PER_TABLE=$((( MAX_NUM_OF_PARTITIONS_PER_TABLE + 
MIN_NUM_OF_PARTITIONS_PER_TABLE) / 2 ))
+
+echo "[${process_id}]  Scale numbers:"
+echo "[${process_id}]  number of databases: ${NUM_OF_DATABASES}"
+echo "[${process_id}]  number of tables: $((NUM_OF_TABLES_PER_DATABASE * 
NUM_OF_DATABASES))"
+echo "[${process_id}]  number of wide tables: ${wLen}"
+echo "[${process_id}]  number of partitions per table: 
${AVG_NUM_OF_PARTITIONS_PER_TABLE}"
+echo "[${process_id}]  number of min partitions per wide table: 
${NUM_OF_WIDE_TABLE_PARTITIONS[0]}"
+echo "[${process_id}]  number of max partitions per wide table: 
${NUM_OF_WIDE_TABLE_PARTITIONS[${wLen}-1]}"
+echo "[${process_id}]  number of permissions: ${NUM_OF_PERMISSIONS}"
+echo "[${process_id}]  number of groups: ${NUM_OF_GROUPS}"
+
+# Random string as prefix for test databases and tables
+prefix_string=$(cat /dev/urandom | tr -dc 'a-z' | fold -w 4 | head -n 1)
+prefix_string=${prefix_string}$(date +%s | cut -c1-4)
+
+DB_NAME=${prefix_string}_db
+
+function validate_ret () {
+  ret=$1
+  if [[ ${ret} != "" && ${ret} -ne 0 ]]; then
+    echo "ERROR!! when running query in bulk mode"
+    exit $ret
+  fi
+}
+
+function get_group () {
+  count=$1
+  group_name=group_$((count % NUM_OF_GROUPS))
+  echo "$group_name"
+}
+
+# Create groups
+function create_groups () {
+  for g in $(seq ${NUM_OF_GROUPS}); do
+    group_name=$(get_group $g)
+    getent passwd ${group_name} | grep "${group_name}" 1>&2>/dev/null
+    if [[ $? -ne 0 ]]; then
+      sudo groupadd ${group_name}
+      sudo useradd -g ${group_name} ${group_name}
+    fi
+  done
+}
+
+# Convenience function to create one table with many external partitons
+function create_wide_table () {
+  db_name=$1
+  tbl_name=$2
+  num_of_pars=$3
+  file_name=$4
+  dir_file_name=$5
+  echo "-- [${process_id}]     Create ${tbl_name} in ${db_name} with 
${num_of_pars} external partitions; " >> ${file_name}
+  echo "CREATE DATABASE IF NOT EXISTS ${db_name}; " >> ${file_name}
+  echo "USE ${db_name};" >> ${file_name}
+  table_dir=${BASE_EXTERNAL_DIR}/${db_name}/${tbl_name}
+  echo "sudo -u hdfs hdfs dfs -rm -R -skipTrash ${table_dir} 2>/dev/null" >> 
${dir_file_name}
+  echo "DROP TABLE IF EXISTS ${tbl_name}; " >> ${file_name}
+  echo "CREATE TABLE ${tbl_name} (s STRING, i INT) PARTITIONED BY (par INT);" 
>> ${file_name}
+  echo "-- create ${num_of_pars} partitions on table ${tbl_name}" >> 
${file_name}
+  for p in $(seq ${num_of_pars}); do
+    dir=${table_dir}/$p
+    echo "sudo -u hdfs hdfs dfs -mkdir -p ${dir}" >> ${dir_file_name}
+    echo "ALTER TABLE ${tbl_name} ADD PARTITION (par=$p) LOCATION '${dir}';" 
>> ${file_name}
+  done
+}
+
+# Convenience function to create wide tables with many external partitions
+function create_external_par_dirs_bulk_file () {
+  file_name=$1
+  dir_file_name=$2
+  echo "-- [${process_id}]     Start bulk process to create wide tables" > 
${file_name}
+  echo "# [${process_id}]      Start to create external dirs for partitions" > 
${dir_file_name}
+  db_id=$(awk -v n="${NUM_OF_DATABASES}" 'BEGIN{srand();print 
int(rand()*n+1)}')
+  db_name=${DB_NAME}_${db_id}
+  for p in "${!NUM_OF_WIDE_TABLE_PARTITIONS[@]}"; do
+    tbl_name=${db_name}_wide_tbl_$p
+         create_wide_table ${db_name} ${tbl_name} 
${NUM_OF_WIDE_TABLE_PARTITIONS[p]} ${file_name} ${dir_file_name}
+  done
+  chmod a+x ${file_name}
+  chmod a+x ${dir_file_name}
+}
+
+# Create internal databases and their tables in one bulk file
+function create_dbs_tbls_bulk_file () {
+  file_name=$1
+  echo "-- [${process_id}]     start bulk load " > ${file_name}
+  for d in $(seq ${NUM_OF_DATABASES}); do
+    db_name=${DB_NAME}_${d}
+    echo "drop database if exists ${db_name}; " >> ${file_name}
+    echo "create database ${db_name}; " >> ${file_name}
+    echo "use ${db_name};" >> ${file_name}
+    NUM_OF_COLS=$(awk -v mn="${MIN_NUM_OF_PARTITIONS_PER_TABLE}" -v 
mx="${MAX_NUM_OF_PARTITIONS_PER_TABLE}" 'BEGIN{srand();print 
int(rand()*(mx-mn)+1)}')
+    NUM_OF_PARS=$(awk -v mn="${MIN_NUM_OF_PARTITIONS_PER_TABLE}" -v 
mx="${MAX_NUM_OF_PARTITIONS_PER_TABLE}" 'BEGIN{srand();print 
int(rand()*(mx-mn)+1)}')
+
+    for t in $(seq ${NUM_OF_TABLES_PER_DATABASE}); do
+      tbl_name=${db_name}_tbl_${t}
+      # create table
+      echo "create table ${tbl_name} (col_start INT, " >> ${file_name}
+      for c in $(seq ${NUM_OF_COLS}); do
+        echo "col_${c} STRING, " >> ${file_name}
+      done
+      echo "col_end INT) partitioned by (par_start STRING, " >> ${file_name}
+      # create many partitions
+      for p in $(seq ${NUM_OF_PARS}); do
+        echo "par_${p} INT, " >> ${file_name}
+      done
+      echo "par_end STRING); " >> ${file_name}
+    done
+  done
+  chmod a+x ${file_name}
+}
+
+# Create database roles
+function create_dbs_roles () {
+  db_file_name=$1
+  total_db_permissions=0
+  echo "-- [${process_id}] Start to create database roles" > ${db_file_name}
+  for d in $(seq ${NUM_OF_ROLES_FOR_DATABASES}); do
+    db_name=${DB_NAME}_${d}
+    role_name=${db_name}_db_role_${d}
+    group_name=$(get_group $d)
+    echo "create role ${role_name}; " >> ${db_file_name}
+    echo "grant all on database ${db_name} to role ${role_name}; " >> 
${db_file_name}
+    echo "grant ${role_name} to group ${group_name};" >> ${db_file_name}
+  done
+  chmod a+x ${db_file_name}
+}
+
+# Create table roles
+function create_tbls_roles () {
+  tbl_file_name=$1
+  echo "-- [${process_id}] Start to create table roles;" > ${tbl_file_name}
+  # create table roles
+  for d in $(seq ${NUM_OF_DATABASES}); do
+    db_name=${DB_NAME}_${d}
+    echo "USE ${db_name};" >> ${tbl_file_name}
+    for t in $(seq ${NUM_OF_ROLES_FOR_TABLES_PER_DATABASE}); do
+      tbl_name=${db_name}_tbl_${t}
+      role_name=${tbl_name}_role_${t}
+      echo "CREATE ROLE ${role_name};" >> ${tbl_file_name}
+      rand_number=$(awk 'BEGIN{srand();print int(rand()*3)}')
+      case "$((rand_number % 3))" in
+          0) echo "grant all on table ${tbl_name} to role ${role_name}; " >> 
${tbl_file_name}
+             ;;
+          1) echo "grant insert on table ${tbl_name} to role ${role_name}; "  
>> ${tbl_file_name}
+             ;;
+          *) echo "grant select on table ${tbl_name} to role ${role_name}; " 
>> ${tbl_file_name}
+             ;;
+      esac
+      group_name=$(get_group $d)
+      echo "grant role ${role_name} to group ${group_name}; "  >> 
${tbl_file_name}
+    done
+  done
+  chmod a+x ${tbl_file_name}
+}
+
+###########################
+# Start from here!
+###########################
+create_groups
+echo "# [${process_id}]        Created ${NUM_OF_GROUPS} groups"
+
+# Use Hive to create the partitions because it supports bulk adding of 
partitions.
+# Hive doesn't allow fully qualified table names in ALTER statements, so start 
with a
+# USE <db>.
+create_tables_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_tables.q
+create_dbs_tbls_bulk_file ${create_tables_file_name}
+echo "# [${process_id}]        Created ${create_tables_file_name} to create 
databases and tables in bulk mode"
+
+create_wide_tables_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_wide_tables.q
+create_wide_tables_dir_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_wide_tables_dirs.sh
+create_external_par_dirs_bulk_file ${create_wide_tables_file_name} 
${create_wide_tables_dir_file_name}
+echo "# [${process_id}]        Created ${create_wide_tables_file_name} to 
create wide tables with external partitions in bulk mode"
+echo "# [${process_id}]        Created ${create_wide_tables_dir_file_name} to 
create external dirs for external partitions in bulk mode"
+
+create_db_role_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_db_roles.q
+create_dbs_roles ${create_db_role_file_name}
+echo "# [${process_id}]        Created ${create_db_role_file_name} to create 
database roles"
+
+create_tbl_role_file_name=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}_bulk_tbl_roles.q
+create_tbls_roles ${create_tbl_role_file_name}
+echo "# [${process_id}]        Created ${create_tbl_role_file_name} to create 
table roles"
+
+sudo -u hive hive -S -f ${create_tables_file_name}
+validate_ret $?
+echo "# [${process_id}]        Succeessfully ran bulk file 
${create_tables_file_name} to create databases and tables"
+
+. ${create_wide_tables_dir_file_name}
+echo "# [${process_id}]        Successfully ran 
${create_wide_tables_dir_file_name} to create dirs for external partitions"
+
+sudo -u hive hive -S -f ${create_wide_tables_file_name}
+validate_ret $?
+echo "# [${process_id}]        Successfully ran bulk file 
${create_wide_tables_file_name} to create wide tables with external partitions"
+
+sudo -u hive ${BL} -f ${create_db_role_file_name} 1>/dev/null # to remove 
white lines after execution
+validate_ret $?
+echo "# [${process_id}]        Successfully created database level roles and 
privileges"
+
+sudo -u hive ${BL} -f ${create_tbl_role_file_name} 1>/dev/null # to remove 
white lines after execution
+validate_ret $?
+echo "# [${process_id}]        Successfully created table level roles and 
privileges"
+
+res_file=${LOCAL_OUTPUT_DIR}/hive_${prefix_string}.res
+echo "-- [${process_id}]       List all databases and roles in ${res_file}" > 
${res_file}
+sudo -u hive  ${BL} -e "show databases" 2>/dev/null 1>>${res_file}
+sudo -u hive  ${BL} -e "show roles" 2>/dev/null 1>>${res_file}
+echo "[${process_id}]  Successfully listed all databases and roles in 
${res_file}"

Reply via email to