Repository: sentry Updated Branches: refs/heads/master 0adf29344 -> d4165e423
SENTRY-583: Add boundary condition test coverage to HDFS synchronization test suite around max #of groups. (Anne Yu, reviewed by Haohao) Project: http://git-wip-us.apache.org/repos/asf/sentry/repo Commit: http://git-wip-us.apache.org/repos/asf/sentry/commit/d4165e42 Tree: http://git-wip-us.apache.org/repos/asf/sentry/tree/d4165e42 Diff: http://git-wip-us.apache.org/repos/asf/sentry/diff/d4165e42 Branch: refs/heads/master Commit: d4165e423d57a230f8982c8dd795ac1d9dd48d0d Parents: 0adf293 Author: Anne Yu <[email protected]> Authored: Thu Apr 21 11:29:37 2016 -0700 Committer: Anne Yu <[email protected]> Committed: Wed May 4 14:43:46 2016 -0700 ---------------------------------------------------------------------- .../sentry/tests/e2e/hdfs/TestDbHdfsBase.java | 341 +++++++++++++++++++ .../tests/e2e/hdfs/TestDbHdfsExtMaxGroups.java | 102 ++++++ .../tests/e2e/hdfs/TestDbHdfsMaxGroups.java | 197 +++++++++++ .../AbstractTestWithStaticConfiguration.java | 10 +- .../tests/e2e/hive/PrivilegeResultSet.java | 7 +- .../sentry/tests/e2e/hive/fs/DFSFactory.java | 9 +- .../sentry/tests/e2e/hive/fs/MiniDFS.java | 14 +- 7 files changed, 673 insertions(+), 7 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/sentry/blob/d4165e42/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsBase.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsBase.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsBase.java new file mode 100644 index 0000000..001f5a4 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsBase.java @@ -0,0 +1,341 @@ +package org.apache.sentry.tests.e2e.hdfs; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import java.io.IOException; +import java.security.PrivilegedExceptionAction; +import java.sql.Connection; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import com.google.common.base.Strings; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; +import org.apache.hadoop.fs.permission.AclStatus; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.security.UserGroupInformation; + +import static org.apache.sentry.tests.e2e.hive.fs.DFSFactory.DFSType; +import static org.apache.sentry.tests.e2e.hive.hiveserver.HiveServerFactory.HiveServer2Type; + +import org.junit.After; +import org.junit.BeforeClass; + +import static org.hamcrest.Matchers.equalTo; +import static org.hamcrest.Matchers.equalToIgnoringCase; +import static org.hamcrest.Matchers.lessThanOrEqualTo; + +import static org.junit.Assert.assertThat; +import static org.junit.Assert.assertTrue; +import static org.junit.Assume.assumeThat; +import static org.junit.Assume.assumeNotNull; +import static org.junit.Assert.fail; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import org.apache.sentry.tests.e2e.hive.AbstractTestWithStaticConfiguration; + +/** + * A base class for HDFS SynUp tests: + * The way to run one test could be like the below: + * mvn test + -P cluster-hadoop-provider-db \ + -f pom.xml \ + -Dsentry.e2etest.admin.user=hive \ + -Dsentry.e2etest.admin.group=hive \ + -Dhive.server2.thrift.port=10000 \ + -Dhive.server2.authentication.kerberos.keytab=.. \ + -Dhive.server2.authentication.kerberos.principal=.. \ + -Dhive.server2.thrift.bind.host=${HS2_HOST} \ + -Dhive.server2.authentication=kerberos \ + -Dsentry.e2e.hive.keytabs.location=.. \ + -Dsentry.host=${SENTRY_HOST} \ + -Dsentry.service.security.mode=kerberos \ + -Dtest.hdfs.e2e.ext.path=/data + */ + +public abstract class TestDbHdfsBase extends AbstractTestWithStaticConfiguration { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestDbHdfsBase.class); + + protected static String metastoreDir; + protected static String scratchLikeDir; + protected static String authenticationType; + protected static UserGroupInformation adminUgi; + protected static UserGroupInformation hiveUgi; + protected static int NUM_RETRIES_FOR_ACLS = 12; + protected static int WAIT_SECS_FOR_ACLS = 1000; //seconds + protected static String testExtPathDir = + System.getProperty("test.hdfs.e2e.ext.path"); + protected static final String KEYTAB_LOCATION = + System.getProperty("sentry.e2e.hive.keytabs.location", "/cdep/keytabs"); + protected static String DFS_TYPE = + System.getProperty("sentry.e2etest.DFSType", DFSType.MiniDFS.name()); + + protected final static String dfsAdmin = System.getProperty("dfs.cluster.administrators", "hdfs"); + + @BeforeClass + public static void setupTestStaticConfiguration() throws Exception { + useSentryService = true; + enableHDFSAcls = true; + AbstractTestWithStaticConfiguration.setupTestStaticConfiguration(); + AbstractTestWithStaticConfiguration.setupAdmin(); + scratchLikeDir = context.getProperty(HiveConf.ConfVars.SCRATCHDIR.varname); + metastoreDir = context.getProperty(HiveConf.ConfVars.METASTOREWAREHOUSE.varname); + authenticationType = System.getProperty(HiveConf.ConfVars.HIVE_SERVER2_AUTHENTICATION.varname); + assumeNotNull(metastoreDir, scratchLikeDir); + if (dfsType.equals(DFSType.ClusterDFS.name())) { + LOGGER.info("Start to run hdfs e2e tests on a real cluster."); + assumeNotNull(KEYTAB_LOCATION, authenticationType); + assumeThat(authenticationType, equalToIgnoringCase("kerberos")); + } else if (dfsType.equals(DFSType.MiniDFS.name())) { + LOGGER.info("Start to run hdfs e2e tests on a mini cluster."); + setupMiniCluster(); + } else { + LOGGER.error("Unknown DFS cluster type: either MiniCluster or ClusterDFS"); + return; + } + // Since they are real e2e tests,for now they + // work on a real cluster managed outside of the tests + assumeThat(hiveServer2Type, equalTo(HiveServer2Type.UnmanagedHiveServer2)); + assumeThat(dfsType, equalTo(DFSType.ClusterDFS.name())); + } + + private static void setupMiniCluster() throws Exception { + createGgis(); + } + + @After + public void clearAfterPerTest() throws Exception { + super.clearAfterPerTest(); + // Clean up any extra data created during testing in external path + LOGGER.info("TestDbHdfsBase clearAfterPerTest"); + kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin)); + if (!Strings.isNullOrEmpty(testExtPathDir)) { + Path path = new Path(testExtPathDir); + FileStatus[] children = fileSystem.listStatus(path); + for (FileStatus fs : children) { + LOGGER.info("Deleting " + fs.toString()); + fileSystem.delete(fs.getPath(), true); + } + } + } + + private FileSystem getFS(UserGroupInformation ugi) throws Exception { + return ugi.doAs(new PrivilegedExceptionAction<FileSystem>() { + public FileSystem run() throws Exception { + Configuration conf = new Configuration(); + return FileSystem.get(conf); + } + }); + } + + private static void createGgis() throws Exception { + if (dfsType.equals(DFSType.MiniDFS.name())) { + adminUgi = UserGroupInformation.createUserForTesting( + System.getProperty("user.name"), new String[]{"supergroup"}); + hiveUgi = UserGroupInformation.createUserForTesting( + "hive", new String[]{"hive"}); + } else if (dfsType.equals(DFSType.ClusterDFS.name())) { + adminUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI("hdfs", KEYTAB_LOCATION + "/hdfs.keytab"); + hiveUgi = UserGroupInformation.loginUserFromKeytabAndReturnUGI("hive", KEYTAB_LOCATION + "/hive.keytab"); + } + } + + protected void verifyAclsRecursive(final List<AclEntry> expectedAcls, final String pathLoc, + final boolean recursive) throws Exception { + if (DFS_TYPE.equals(DFSType.MiniDFS.name())) { + fileSystem = getFS(adminUgi); + adminUgi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + verifyAclsHelper(expectedAcls, pathLoc, recursive); + return null; + } + }); + } else if (DFS_TYPE.equals(DFSType.ClusterDFS.name())) { + kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin)); + verifyAclsHelper(expectedAcls, pathLoc, recursive); + } else { + fail("Unknown DFS cluster type: " + DFS_TYPE); + } + } + + protected void verifyNoAclRecursive(final List<AclEntry> noAcls, final String pathLoc, + final boolean recursive) throws Exception { + if (DFS_TYPE.equals(DFSType.MiniDFS.name())) { + fileSystem = getFS(adminUgi); + adminUgi.doAs(new PrivilegedExceptionAction<Void>() { + @Override + public Void run() throws Exception { + verifyNoAclHelper(noAcls, pathLoc, recursive); + return null; + } + }); + } else if (DFS_TYPE.equals(DFSType.ClusterDFS.name())) { + kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin)); + verifyNoAclHelper(noAcls, pathLoc, recursive); + } else { + fail("Unknown DFS cluster type: " + DFS_TYPE); + } + } + + /** + * Verify extended acl entries are correctly synced up + * @param expectedAcls + * @param pathLoc + * @param recursive + * @throws Exception + */ + private void verifyAclsHelper(List<AclEntry> expectedAcls, String pathLoc, + boolean recursive) throws Exception { + int retry = 0; + Path path = new Path(pathLoc); + LOGGER.info("expectedAcls of [" + pathLoc + "] = " + expectedAcls.toString()); + // Syncing up acls takes some time so make validation in a loop + while (retry < NUM_RETRIES_FOR_ACLS) { + AclStatus aclStatus = fileSystem.getAclStatus(path); + List<AclEntry> actualAcls = new ArrayList<>(aclStatus.getEntries()); + LOGGER.info("[" + retry + "] actualAcls of [" + pathLoc + "] = " + actualAcls.toString()); + retry += 1; + if (!actualAcls.isEmpty() && !actualAcls.contains(expectedAcls.get(expectedAcls.size()-1))) { + Thread.sleep(WAIT_SECS_FOR_ACLS); + continue; + } + for (AclEntry expected : expectedAcls) { + assertTrue("Fail to find aclEntry: " + expected.toString(), + actualAcls.contains(expected)); + } + break; + } + assertThat(retry, lessThanOrEqualTo(NUM_RETRIES_FOR_ACLS)); + if (recursive && fileSystem.getFileStatus(path).isDirectory()) { + FileStatus[] children = fileSystem.listStatus(path); + for (FileStatus fs : children) { + verifyAclsRecursive(expectedAcls, fs.getPath().toString(), recursive); + } + } + } + + /** + * Verify there is no specified acls gotten synced up in the path status + * @param noAcls + * @param pathLoc + * @param recursive + * @throws Exception + */ + private void verifyNoAclHelper(List<AclEntry> noAcls, String pathLoc, + boolean recursive) throws Exception { + int retry = 0; + // Retry a couple of times in case the incorrect acls take time to be synced up + while (retry < NUM_RETRIES_FOR_ACLS) { + Path path = new Path(pathLoc); + AclStatus aclStatus = fileSystem.getAclStatus(path); + List<AclEntry> actualAcls = new ArrayList<>(aclStatus.getEntries()); + LOGGER.info("[" + retry + "] actualAcls of [" + pathLoc + "] = " + actualAcls.toString()); + Thread.sleep(1000); // wait for syncup + retry += 1; + for (AclEntry acl : actualAcls) { + if (noAcls.contains(acl)) { + fail("Path [ " + pathLoc + " ] should not contain " + acl.toString()); + } + } + } + Path path = new Path(pathLoc); + if (recursive && fileSystem.getFileStatus(path).isDirectory()) { + FileStatus[] children = fileSystem.listStatus(path); + for (FileStatus fs : children) { + verifyNoAclRecursive(noAcls, fs.getPath().toString(), recursive); + } + } + } + + /** + * Drop and create role, in case the previous + * tests leave same roles uncleaned up + * @param statement + * @param roleName + * @throws Exception + */ + protected void dropRecreateRole(Statement statement, String roleName) throws Exception { + try { + exec(statement, "DROP ROLE " + roleName); + } catch (Exception ex) { + //noop + LOGGER.info("Role " + roleName + " does not exist. But it's ok."); + } finally { + exec(statement, "CREATE ROLE " + roleName); + } + } + + /** + * Create an internal test database and table + * @param db + * @param tbl + * @throws Exception + */ + protected void dropRecreateDbTblRl(String db, String tbl) throws Exception { + dropRecreateDbTblRl(null, db, tbl); + } + + /** + * Create test database and table with location pointing to testPathLoc + * @param testPathLoc + * @param db + * @param tbl + * @throws Exception + */ + protected void dropRecreateDbTblRl(String testPathLoc, String db, String tbl) throws Exception { + Connection connection = context.createConnection(ADMIN1); + Statement statement = connection.createStatement(); + exec(statement, "DROP DATABASE IF EXISTS " + db + " CASCADE"); + if (testPathLoc != null ) { + exec(statement, "CREATE DATABASE " + db + " LOCATION \'" + testPathLoc + "\'"); + } else { + exec(statement, "CREATE DATABASE " + db); + } + exec(statement, "USE " + db); + exec(statement, "CREATE TABLE " + tbl + "(number INT, value STRING) PARTITIONED BY (par INT)"); + exec(statement, "INSERT INTO TABLE " + tbl + " PARTITION(par=1) VALUES (1, 'test1')"); + exec(statement, "SELECT * FROM " + tbl); + if (statement != null) { + statement.close(); + } + if (connection != null ) { + connection.close(); + } + } + + protected static void kinitFromKeytabFile (String user, String keyTabFile) throws IOException { + Configuration conf = new Configuration(); + conf.set("hadoop.security.authentication", authenticationType); + UserGroupInformation.setConfiguration(conf); + UserGroupInformation.loginUserFromKeytab(user, keyTabFile); + } + + protected static String getKeyTabFileFullPath(String user) { + return KEYTAB_LOCATION + "/" + user + ".keytab"; + } +} + http://git-wip-us.apache.org/repos/asf/sentry/blob/d4165e42/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsExtMaxGroups.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsExtMaxGroups.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsExtMaxGroups.java new file mode 100644 index 0000000..5784d85 --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsExtMaxGroups.java @@ -0,0 +1,102 @@ +package org.apache.sentry.tests.e2e.hdfs; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.google.common.base.Strings; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; + +import static org.junit.Assume.assumeThat; +import static org.hamcrest.Matchers.not; + +import org.apache.sentry.tests.e2e.hive.PrivilegeResultSet; +import org.junit.Test; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Sentry-583 part 2: + * Add boundary condition test coverage to HDFS synchronization. + * Testing paths are in the pre-defined external path (instead of internal HiveWareDir) + * test suite around max #of groups; Normally, HDFS ACLs has a limit of 32 entries per + * object (HDFS-5617), but this limit should not be enforced when using Sentry HDFS + * synchronization. + */ +public class TestDbHdfsExtMaxGroups extends TestDbHdfsMaxGroups { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestDbHdfsExtMaxGroups.class); + + /** + * Test Db and tbl level acls are synced up to db, tbl and par paths + * The path is pre-configured in "sentry.hdfs.integration.path.prefixes" + * @throws Exception + */ + @Test + public void testExtMaxAclsWithGroups() throws Exception { + final String TEST_DB = "test_hdfs_max_group_ext_db"; + assumeThat(Strings.isNullOrEmpty(testExtPathDir), not(true)); + String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(testExtPathDir)) + "/" + TEST_DB; + LOGGER.info("extDbDir = " + extDbDir); + Path extDbPath = new Path(extDbDir); + kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin)); + if (fileSystem.exists(extDbPath)) { + LOGGER.info("Deleting " + extDbDir); + fileSystem.delete(extDbPath, true); + } + dropRecreateDbTblRl(extDbDir, TEST_DB, TEST_TBL); + testMaxGroupsDbTblHelper(extDbDir, TEST_DB); + } + + /** + * A negative test case where path is not in prefix list. + * In this case, acls should not be applied to db, tbl and par paths + * @throws Exception + */ + @Test + public void testPathNotInPrefix() throws Exception { + final String TEST_DB = "test_hdfs_max_group_bad_db"; + String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(scratchLikeDir)) + "/" + TEST_DB; + LOGGER.info("extDbDir = " + extDbDir); + Path extDbPath = new Path(extDbDir); + kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin)); + if (fileSystem.exists(extDbPath)) { + fileSystem.delete(extDbPath, true); + } + dropRecreateDbTblRl(extDbDir, TEST_DB, TEST_TBL); + Connection connection = context.createConnection(ADMIN1); + Statement statement = connection.createStatement(); + exec(statement, "USE " + TEST_DB); + dropRecreateRole(statement, TEST_ROLE1); + String dbgrp = "dbgrp"; + exec(statement, "GRANT ALL ON DATABASE " + TEST_DB + " TO ROLE " + TEST_ROLE1); + exec(statement, "GRANT ROLE " + TEST_ROLE1 + " TO GROUP " + dbgrp); + + context.close(); + + List<AclEntry> acls = new ArrayList<>(); + acls.add(AclEntry.parseAclEntry("group:" + dbgrp + ":rwx", true)); + verifyNoAclRecursive(acls, extDbDir, true); + } +} http://git-wip-us.apache.org/repos/asf/sentry/blob/d4165e42/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsMaxGroups.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsMaxGroups.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsMaxGroups.java new file mode 100644 index 0000000..623ed5d --- /dev/null +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hdfs/TestDbHdfsMaxGroups.java @@ -0,0 +1,197 @@ +package org.apache.sentry.tests.e2e.hdfs; + +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +import com.google.common.base.Strings; + +import java.sql.Connection; +import java.sql.Statement; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.permission.AclEntry; + +import static org.junit.Assume.assumeThat; +import static org.hamcrest.Matchers.not; + +import org.apache.sentry.tests.e2e.hive.PrivilegeResultSet; +import static org.junit.Assert.assertTrue; +import org.junit.Test; + +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Sentry-583: + * Add boundary condition test coverage to HDFS synchronization test suite around max #of groups; + * Normally, HDFS ACLs has a limit of 32 entries per object (HDFS-5617), but this limit should + * not be enforced when using Sentry HDFS synchronization. + */ +public class TestDbHdfsMaxGroups extends TestDbHdfsBase { + private static final Logger LOGGER = LoggerFactory + .getLogger(TestDbHdfsMaxGroups.class); + private static final int MAX_NUM_OF_GROUPS = 33; + protected static final String TEST_TBL = "tbl"; + protected static final String TEST_ROLE1 = "test_hdfs_max_group_role1"; + protected static final String TEST_ROLE2 = "test_hdfs_max_group_role2"; + protected static final String TEST_ROLE3 = "test_hdfs_max_group_role3"; + + /** + * Test Db and tbl level acls are synced up to db, tbl and par paths + * @throws Exception + */ + @Test + public void testIntDbTblMaxAclsWithGroups() throws Exception { + final String TEST_DB = "test_hdfs_max_group_int_db"; + String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(metastoreDir)) + "/" + TEST_DB + ".db"; + LOGGER.info("extDbDir = " + extDbDir); + dropRecreateDbTblRl(TEST_DB, TEST_TBL); + testMaxGroupsDbTblHelper(extDbDir, TEST_DB); + } + + /** + * Test col level acls should not sync up to db, tbl and par paths + * @throws Exception + */ + @Test + public void testIntColMaxAclsWithGroups() throws Exception { + final String TEST_DB = "test_hdfs_max_group_int_col_db"; + String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(metastoreDir)) + "/" + TEST_DB + ".db"; + LOGGER.info("extDbDir = " + extDbDir); + dropRecreateDbTblRl(TEST_DB, TEST_TBL); + testMaxGroupsColHelper(extDbDir, TEST_DB); + } + + /** + * Test Db and tbl level acls are synced up to db, tbl and par paths + * The path is pre-configured in "sentry.hdfs.integration.path.prefixes" + * @throws Exception + */ + @Test + public void testExtMaxAclsWithGroups() throws Exception { + final String TEST_DB = "test_hdfs_max_group_ext_db"; + assumeThat(Strings.isNullOrEmpty(testExtPathDir), not(true)); + String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(testExtPathDir)) + "/" + TEST_DB; + LOGGER.info("extDbDir = " + extDbDir); + Path extDbPath = new Path(extDbDir); + kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin)); + if (fileSystem.exists(extDbPath)) { + LOGGER.info("Deleting " + extDbDir); + fileSystem.delete(extDbPath, true); + } + dropRecreateDbTblRl(extDbDir, TEST_DB, TEST_TBL); + testMaxGroupsDbTblHelper(extDbDir, TEST_DB); + } + + /** + * A negative test case where path is not in prefix list. + * In this case, acls should not be applied to db, tbl and par paths + * @throws Exception + */ + @Test + public void testPathNotInPrefix() throws Exception { + final String TEST_DB = "test_hdfs_max_group_bad_db"; + String extDbDir = Path.getPathWithoutSchemeAndAuthority(new Path(scratchLikeDir)) + "/" + TEST_DB; + LOGGER.info("extDbDir = " + extDbDir); + Path extDbPath = new Path(extDbDir); + kinitFromKeytabFile(dfsAdmin, getKeyTabFileFullPath(dfsAdmin)); + if (fileSystem.exists(extDbPath)) { + fileSystem.delete(extDbPath, true); + } + dropRecreateDbTblRl(extDbDir, TEST_DB, TEST_TBL); + Connection connection = context.createConnection(ADMIN1); + Statement statement = connection.createStatement(); + exec(statement, "USE " + TEST_DB); + dropRecreateRole(statement, TEST_ROLE1); + String dbgrp = "dbgrp"; + exec(statement, "GRANT ALL ON DATABASE " + TEST_DB + " TO ROLE " + TEST_ROLE1); + exec(statement, "GRANT ROLE " + TEST_ROLE1 + " TO GROUP " + dbgrp); + + context.close(); + + List<AclEntry> acls = new ArrayList<>(); + acls.add(AclEntry.parseAclEntry("group:" + dbgrp + ":rwx", true)); + verifyNoAclRecursive(acls, extDbDir, true); + } + + protected void testMaxGroupsDbTblHelper(String extDbDir, String db) throws Exception { + String tblPathLoc = extDbDir + "/" + TEST_TBL; + String colPathLoc = tblPathLoc + "/par=1"; + LOGGER.info("tblPathLoc = " + tblPathLoc); + LOGGER.info("colPathLoc = " + colPathLoc); + Connection connection = context.createConnection(ADMIN1); + Statement statement = connection.createStatement(); + exec(statement, "USE " + db); + dropRecreateRole(statement, TEST_ROLE1); + dropRecreateRole(statement, TEST_ROLE2); + exec(statement, "GRANT ALL ON DATABASE " + db + " TO ROLE " + TEST_ROLE1); + exec(statement, "GRANT INSERT ON TABLE " + TEST_TBL + " TO ROLE " + TEST_ROLE2); + + List<AclEntry> dbacls = new ArrayList<>(); + List<AclEntry> tblacls = new ArrayList<>(); + for (int i = 0; i < MAX_NUM_OF_GROUPS; i ++) { + String dbgrp = "dbgrp" + String.valueOf(i); + String tblgrp = "tblgrp" + String.valueOf(i); + dbacls.add(AclEntry.parseAclEntry("group:" + dbgrp + ":rwx", true)); + tblacls.add(AclEntry.parseAclEntry("group:" + tblgrp + ":-wx", true)); + exec(statement, "GRANT ROLE " + TEST_ROLE1 + " TO GROUP " + dbgrp); + exec(statement, "GRANT ROLE " + TEST_ROLE2 + " TO GROUP " + tblgrp); + } + context.close(); + + // db level privileges should sync up acls to db, tbl and par paths + verifyAclsRecursive(dbacls, extDbDir, true); + // tbl level privileges should sync up acls to tbl and par paths + verifyAclsRecursive(tblacls, tblPathLoc, true); + // tbl level privileges should not sync up acls to db path + verifyNoAclRecursive(tblacls, extDbDir, false); + } + + protected void testMaxGroupsColHelper(String extDbDir, String db) throws Exception { + String tblPathLoc = extDbDir + "/" + TEST_TBL; + String colPathLoc = tblPathLoc + "/par=1"; + LOGGER.info("tblPathLoc = " + tblPathLoc); + LOGGER.info("colPathLoc = " + colPathLoc); + Connection connection = context.createConnection(ADMIN1); + Statement statement = connection.createStatement(); + exec(statement, "USE " + db); + dropRecreateRole(statement, TEST_ROLE3); + exec(statement, "GRANT SELECT(value) ON TABLE " + TEST_TBL + " TO ROLE " + TEST_ROLE3); + + List<AclEntry> colacls = new ArrayList<>(); + for (int i = 0; i < MAX_NUM_OF_GROUPS; i ++) { + String colgrp = "colgrp" + String.valueOf(i); + colacls.add(AclEntry.parseAclEntry("group:" + colgrp + ":r-x", true)); + exec(statement, "GRANT ROLE " + TEST_ROLE3 + " TO GROUP " + colgrp); + } + + PrivilegeResultSet pRset = new PrivilegeResultSet(statement, "SHOW GRANT ROLE " + TEST_ROLE3); + LOGGER.info(TEST_ROLE3 + " privileges = " + pRset.toString()); + assertTrue(pRset.verifyResultSetColumn("database", db)); + assertTrue(pRset.verifyResultSetColumn("table", TEST_TBL)); + assertTrue(pRset.verifyResultSetColumn("column", "value")); + assertTrue(pRset.verifyResultSetColumn("privilege", "select")); + assertTrue(pRset.verifyResultSetColumn("principal_name", TEST_ROLE3)); + + context.close(); + + // column level perm should not syncup acls to any db, tbl and par paths + verifyNoAclRecursive(colacls, extDbDir, true); + } +} http://git-wip-us.apache.org/repos/asf/sentry/blob/d4165e42/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java index d2a1d36..0e4b3ca 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/AbstractTestWithStaticConfiguration.java @@ -187,6 +187,9 @@ public abstract class AbstractTestWithStaticConfiguration { protected static SentryPolicyServiceClient client; private static boolean startSentry = new Boolean(System.getProperty(EXTERNAL_SENTRY_SERVICE, "false")); + protected static boolean enableHDFSAcls = false; + protected static String dfsType; + /** * Get sentry client with authenticated Subject * (its security-related attributes(for example, kerberos principal and key) @@ -281,8 +284,8 @@ public abstract class AbstractTestWithStaticConfiguration { dataDir = assertCreateDir(new File(baseDir, "data")); policyFileLocation = new File(confDir, HiveServerFactory.AUTHZ_PROVIDER_FILENAME); - String dfsType = System.getProperty(DFSFactory.FS_TYPE); - dfs = DFSFactory.create(dfsType, baseDir, testServerType); + dfsType = System.getProperty(DFSFactory.FS_TYPE, DFSFactory.DFSType.MiniDFS.toString()); + dfs = DFSFactory.create(dfsType, baseDir, testServerType, enableHDFSAcls); fileSystem = dfs.getFileSystem(); PolicyFile policyFile = PolicyFile.setAdminOnServer1(ADMIN1) @@ -292,7 +295,7 @@ public abstract class AbstractTestWithStaticConfiguration { String policyURI; if (policyOnHdfs) { String dfsUri = FileSystem.getDefaultUri(fileSystem.getConf()).toString(); - LOGGER.error("dfsUri " + dfsUri); + LOGGER.info("dfsUri " + dfsUri); policyURI = dfsUri + System.getProperty("sentry.e2etest.hive.policy.location", "/user/hive/sentry"); policyURI += "/" + HiveServerFactory.AUTHZ_PROVIDER_FILENAME; @@ -315,6 +318,7 @@ public abstract class AbstractTestWithStaticConfiguration { "org.apache.hadoop.hive.ql.lockmgr.EmbeddedLockManager"); } + HiveConf hiveConf = new HiveConf(); hiveServer = create(properties, baseDir, confDir, logDir, policyURI, fileSystem); hiveServer.start(); createContext(); http://git-wip-us.apache.org/repos/asf/sentry/blob/d4165e42/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java index 8818c4c..3e73cc6 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/PrivilegeResultSet.java @@ -17,6 +17,7 @@ package org.apache.sentry.tests.e2e.hive; +import org.fest.util.Strings; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -89,7 +90,11 @@ public class PrivilegeResultSet { for (int i = 0; i < this.colNum; i ++) { if (this.header.get(i).equalsIgnoreCase(colName)) { for (int j = 0; j < this.privilegeResultSet.size(); j ++) { - if (this.privilegeResultSet.get(j).get(i).equalsIgnoreCase(colVal)) { + String val = this.privilegeResultSet.get(j).get(i); + if (Strings.isNullOrEmpty(colVal)) { + return Strings.isNullOrEmpty(val); + } + if (val.equalsIgnoreCase(colVal)) { LOGGER.info("Found " + colName + " contains a value = " + colVal); return true; } http://git-wip-us.apache.org/repos/asf/sentry/blob/d4165e42/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java index e1881b4..7f650ce 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/DFSFactory.java @@ -24,7 +24,7 @@ public class DFSFactory { public static final String FS_TYPE = "sentry.e2etest.DFSType"; public static DFS create(String dfsType, File baseDir, - String serverType) throws Exception { + String serverType, boolean enableHDFSAcls) throws Exception { DFSType type; if(dfsType!=null) { type = DFSType.valueOf(dfsType.trim()); @@ -33,7 +33,7 @@ public class DFSFactory { } switch (type) { case MiniDFS: - return new MiniDFS(baseDir, serverType); + return new MiniDFS(baseDir, serverType, enableHDFSAcls); case ClusterDFS: return new ClusterDFS(); default: @@ -41,6 +41,11 @@ public class DFSFactory { } } + public static DFS create(String dfsType, File baseDir, + String serverType) throws Exception { + return create(dfsType, baseDir, serverType, false); + } + @VisibleForTesting public static enum DFSType { MiniDFS, http://git-wip-us.apache.org/repos/asf/sentry/blob/d4165e42/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java ---------------------------------------------------------------------- diff --git a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java index 77af432..970ed8d 100644 --- a/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java +++ b/sentry-tests/sentry-tests-hive/src/test/java/org/apache/sentry/tests/e2e/hive/fs/MiniDFS.java @@ -54,7 +54,8 @@ public class MiniDFS extends AbstractDFS { private static MiniDFSCluster dfsCluster; - MiniDFS(File baseDir, String serverType) throws Exception { + private void createMiniDFSCluster(File baseDir, String serverType, + boolean enableHDFSAcls) throws Exception { Configuration conf = new Configuration(); if (HiveServer2Type.InternalMetastore.name().equalsIgnoreCase(serverType)) { // set the test group mapping that maps user to a group of same name @@ -68,6 +69,9 @@ public class MiniDFS extends AbstractDFS { conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, dfsDir.getPath()); conf.set("hadoop.security.group.mapping", MiniDFS.PseudoGroupMappingService.class.getName()); + if (enableHDFSAcls) { + conf.set("dfs.namenode.acls.enabled", "true"); + } Configuration.addDefaultResource("test.xml"); dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build(); fileSystem = dfsCluster.getFileSystem(); @@ -76,6 +80,14 @@ public class MiniDFS extends AbstractDFS { dfsBaseDir = assertCreateDfsDir(new Path(new Path(fileSystem.getUri()), "/base")); } + MiniDFS(File baseDir, String serverType) throws Exception { + createMiniDFSCluster(baseDir, serverType, false); + } + + MiniDFS(File baseDir, String serverType, boolean enableHDFSAcls) throws Exception { + createMiniDFSCluster(baseDir, serverType, enableHDFSAcls); + } + @Override public void tearDown() throws Exception { if(dfsCluster != null) {
