This is an automated email from the ASF dual-hosted git repository.
liuxun pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/gravitino.git
The following commit(s) were added to refs/heads/main by this push:
new a307d904cf [#6917] test(authz): Add more test cases about Hdfs Ranger
authz plugin (#7001)
a307d904cf is described below
commit a307d904cfd31bdd86d927bec3d97ded358875bb
Author: roryqi <[email protected]>
AuthorDate: Wed Apr 23 16:08:48 2025 +0800
[#6917] test(authz): Add more test cases about Hdfs Ranger authz plugin
(#7001)
### What changes were proposed in this pull request?
Add more test cases about Hdfs Ranger authz plugin
### Why are the changes needed?
Fix: #6917
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
Add more ITs.
---
.../test/TestChainedAuthorizationIT.java | 4 +-
.../ranger/RangerAuthorizationHDFSPlugin.java | 63 ++-
.../ranger/RangerAuthorizationPlugin.java | 4 +-
.../test/RangerAuthorizationHDFSPluginIT.java | 4 +-
.../ranger/integration/test/RangerBaseE2EIT.java | 431 +++++++++++++++------
.../ranger/integration/test/RangerFilesetIT.java | 37 +-
.../ranger/integration/test/RangerHiveE2EIT.java | 2 +-
.../integration/test/RangerHiveHdfsE2EIT.java | 358 ++++++++++++++---
.../integration/test/RangerIcebergE2EIT.java | 2 +-
.../ranger/integration/test/RangerPaimonE2EIT.java | 2 +-
10 files changed, 725 insertions(+), 182 deletions(-)
diff --git
a/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java
b/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java
index bb65e94a3b..582a4794ec 100644
---
a/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java
+++
b/authorizations/authorization-chain/src/test/java/org/apache/gravitino/authorization/chain/integration/test/TestChainedAuthorizationIT.java
@@ -247,7 +247,7 @@ public class TestChainedAuthorizationIT extends
RangerBaseE2EIT {
private void doTestCreateSchema(String roleName, SecurableObject
securableObject)
throws IOException {
// Choose a catalog
- useCatalog();
+ reset();
// First, fail to create the schema
Exception accessControlException =
@@ -426,7 +426,7 @@ public class TestChainedAuthorizationIT extends
RangerBaseE2EIT {
}
@Override
- protected void useCatalog() {
+ protected void reset() {
// TODO
}
diff --git
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java
index 0d6d2decb3..0d11c725f6 100644
---
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java
+++
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java
@@ -24,6 +24,7 @@ import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
import java.util.ArrayList;
+import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
@@ -31,6 +32,8 @@ import java.util.Map;
import java.util.Objects;
import java.util.Set;
import java.util.regex.Pattern;
+import java.util.stream.Collectors;
+import org.apache.commons.lang.StringUtils;
import org.apache.gravitino.Entity;
import org.apache.gravitino.GravitinoEnv;
import org.apache.gravitino.MetadataObject;
@@ -202,8 +205,6 @@ public class RangerAuthorizationHDFSPlugin extends
RangerAuthorizationPlugin {
MetadataObject.Type operationType,
AuthorizationMetadataObject oldAuthzMetaObject,
AuthorizationMetadataObject newAuthzMetaObject) {
- PathBasedMetadataObject newPathBasedMetadataObject =
- (PathBasedMetadataObject) newAuthzMetaObject;
List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaObject);
List<RangerPolicy> existNewPolicies =
wildcardSearchPolies(newAuthzMetaObject);
if (oldPolicies.isEmpty()) {
@@ -217,14 +218,48 @@ public class RangerAuthorizationHDFSPlugin extends
RangerAuthorizationPlugin {
policy -> {
try {
// Update the policy name is following Gravitino's spec
- policy.setName(getAuthorizationPath(newPathBasedMetadataObject));
+ // Only Hive managed table rename will use this case
+ String oldResource =
+ policy
+ .getResources()
+ .get(rangerHelper.policyResourceDefines.get(0))
+ .getValues()
+ .get(0);
+ List<String> oldResourceNames =
+ Arrays.stream(oldResource.split("/"))
+ .filter(path -> StringUtils.isNotBlank(path) &&
!".".equals(path))
+ .collect(Collectors.toList());
+ List<String> newResourceNames =
+ Arrays.stream(
+ getAuthorizationPath((PathBasedMetadataObject)
newAuthzMetaObject)
+ .split("/"))
+ .filter(path -> StringUtils.isNotBlank(path) &&
!".".equals(path))
+ .collect(Collectors.toList());
+
+ int minLen = Math.min(oldResourceNames.size(),
newResourceNames.size());
+ for (int i = 0; i < minLen; i++) {
+ String oldName = oldResourceNames.get(i);
+ String newName = newResourceNames.get(i);
+ if (!oldName.equals(newName)) {
+ if (oldName.equals(oldAuthzMetaObject.name())
+ && newName.equals(newAuthzMetaObject.name())) {
+ oldResourceNames.set(i, newAuthzMetaObject.name());
+ break;
+ } else {
+ // If resource doesn't match, ignore this resource
+ return;
+ }
+ }
+ }
+ String newResourcePath = "/" + String.join("/", oldResourceNames);
+
+ policy.setName(newResourcePath);
// Update the policy resource name to new name
policy
.getResources()
.put(
rangerHelper.policyResourceDefines.get(0),
- new RangerPolicy.RangerPolicyResource(
- getAuthorizationPath(newPathBasedMetadataObject)));
+ new RangerPolicy.RangerPolicyResource(newResourcePath));
boolean alreadyExist =
existNewPolicies.stream()
@@ -527,13 +562,21 @@ public class RangerAuthorizationHDFSPlugin extends
RangerAuthorizationPlugin {
locations.forEach(
locationPath -> {
+ Entity.EntityType pathEntityType;
+ MetadataObject.Type pathObjectType;
+ if (type == Entity.EntityType.METALAKE) {
+ pathEntityType = Entity.EntityType.CATALOG;
+ pathObjectType = MetadataObject.Type.CATALOG;
+ } else {
+ pathEntityType = type;
+ pathObjectType = securableObject.type();
+ }
PathBasedMetadataObject pathBaseMetadataObject =
new PathBasedMetadataObject(
securableObject.parent(),
securableObject.name(),
- pathExtractor.getPath(
- MetadataObjectUtil.toEntityType(securableObject),
locationPath),
- PathBasedMetadataObject.PathType.get(securableObject.type()),
+ pathExtractor.getPath(pathEntityType, locationPath),
+ PathBasedMetadataObject.PathType.get(pathObjectType),
recursive);
pathBaseMetadataObject.validateAuthorizationMetadataObject();
rangerSecurableObjects.add(
@@ -811,9 +854,9 @@ public class RangerAuthorizationHDFSPlugin extends
RangerAuthorizationPlugin {
@Override
public String getPath(Entity.EntityType type, String location) {
if (type == Entity.EntityType.CATALOG) {
- return String.format("%s/*/*/", location);
+ return String.format("%s/*/*", location);
} else if (type == Entity.EntityType.SCHEMA) {
- return String.format("%s/*/", location);
+ return String.format("%s/*", location);
}
if (type == Entity.EntityType.TABLE || type ==
Entity.EntityType.FILESET) {
return location;
diff --git
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
index 78a9a25596..cb42db950a 100644
---
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
+++
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
@@ -474,9 +474,9 @@ public abstract class RangerAuthorizationPlugin
.build();
if (preOwner != null) {
if (preOwner.type() == Owner.Type.USER) {
- preOwnerUserName = newOwner.name();
+ preOwnerUserName = preOwner.name();
} else {
- preOwnerGroupName = newOwner.name();
+ preOwnerGroupName = preOwner.name();
}
}
if (newOwner.type() == Owner.Type.USER) {
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java
index dae2ebadc3..2ec989a23e 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationHDFSPluginIT.java
@@ -194,7 +194,7 @@ public class RangerAuthorizationHDFSPluginIT {
authorizationSecurableObject -> {
PathBasedMetadataObject pathBasedMetadataObject =
((PathBasedMetadataObject)
authorizationSecurableObject);
- return
pathBasedMetadataObject.path().equals("/test/*/*/")
+ return
pathBasedMetadataObject.path().equals("/test/*/*")
&& pathBasedMetadataObject.recursive();
})
.count());
@@ -234,7 +234,7 @@ public class RangerAuthorizationHDFSPluginIT {
authorizationSecurableObject -> {
PathBasedMetadataObject pathBasedMetadataObject =
((PathBasedMetadataObject)
authorizationSecurableObject);
- return
pathBasedMetadataObject.path().equals("/test/*/")
+ return
pathBasedMetadataObject.path().equals("/test/*")
&& pathBasedMetadataObject.recursive();
})
.count());
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
index 94b831d4ac..f75625f518 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerBaseE2EIT.java
@@ -139,10 +139,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
if (client != null) {
Arrays.stream(catalog.asSchemas().listSchemas())
.filter(schema -> !schema.equals("default"))
- .forEach(
- (schema -> {
- catalog.asSchemas().dropSchema(schema, false);
- }));
+ .forEach((schema -> catalog.asSchemas().dropSchema(schema, false)));
// The `dropCatalog` call will invoke the catalog metadata object to
remove privileges
Arrays.stream(metalake.listCatalogs())
@@ -203,12 +200,16 @@ public abstract class RangerBaseE2EIT extends BaseIT {
protected abstract void checkDeleteSQLWithModifyPrivileges();
- protected abstract void useCatalog();
+ protected abstract void reset();
protected abstract void checkWithoutPrivileges();
protected abstract void testAlterTable();
+ protected void testDropTable() {
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+ }
+
// ISSUE-5947: can't rename a catalog or a metalake
@Test
protected void testRenameMetalakeOrCatalog() {
@@ -224,25 +225,14 @@ public abstract class RangerBaseE2EIT extends BaseIT {
}
@Test
- protected void testCreateSchema() throws InterruptedException, IOException {
+ protected void testCreateSchema() throws InterruptedException {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestCreateSchemaChecker createSchemaChecker = getTestCreateSchemaChecker();
// First, fail to create the schema
- Assertions.assertThrows(Exception.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
- Exception accessControlException =
- Assertions.assertThrows(Exception.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
- Assertions.assertTrue(
- accessControlException
- .getMessage()
- .contains(
- String.format(
- "Permission denied: user [%s] does not have [create]
privilege",
- testUserName()))
- || accessControlException
- .getMessage()
- .contains(
- String.format("Permission denied: user=%s, access=WRITE",
testUserName())));
+ createSchemaChecker.checkCreateSchemaWithoutPriv();
// Second, grant the `CREATE_SCHEMA` role
String roleName = currentFunName();
@@ -256,18 +246,35 @@ public abstract class RangerBaseE2EIT extends BaseIT {
// Third, succeed to create the schema
sparkSession.sql(SQL_CREATE_SCHEMA);
- // Fourth, fail to create the table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_TABLE));
+ // Fourth, test to create the table
+ createSchemaChecker.checkCreateTable();
// Clean up
catalog.asSchemas().dropSchema(schemaName, false);
metalake.deleteRole(roleName);
}
+ protected TestCreateSchemaChecker getTestCreateSchemaChecker() {
+ return new TestCreateSchemaChecker();
+ }
+
+ protected static class TestCreateSchemaChecker {
+ void checkCreateSchemaWithoutPriv() {
+ Assertions.assertThrows(Exception.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+
+ void checkCreateTable() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_TABLE));
+ }
+ }
+
@Test
- void testCreateTable() throws InterruptedException {
+ void testCreateTable() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestCreateTableChecker checker = getTestCreateTableChecker();
// First, create a role for creating a database and grant role to the user
String createSchemaRole = currentFunName();
@@ -285,7 +292,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
// Third, fail to create a table
sparkSession.sql(SQL_USE_SCHEMA);
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_TABLE));
+ checker.checkCreateTable();
// Fourth, create a role for creating a table and grant to the user
String createTableRole = currentFunName() + "2";
@@ -301,9 +308,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
sparkSession.sql(SQL_CREATE_TABLE);
// Sixth, fail to read and write a table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_INSERT_TABLE));
- Assertions.assertThrows(
- AccessControlException.class, () ->
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
+ checker.checkTableReadWrite();
// Clean up
catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName,
tableName));
@@ -312,10 +317,30 @@ public abstract class RangerBaseE2EIT extends BaseIT {
metalake.deleteRole(createSchemaRole);
}
+ protected TestCreateTableChecker getTestCreateTableChecker() {
+ return new TestCreateTableChecker();
+ }
+
+ protected static class TestCreateTableChecker {
+ void checkTableReadWrite() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_INSERT_TABLE));
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
+ }
+
+ void checkCreateTable() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_TABLE));
+ }
+ }
+
@Test
- void testSelectModifyTableWithMetalakeLevelRole() throws
InterruptedException {
+ void testSelectModifyTableWithMetalakeLevelRole() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestSelectModifyTableChecker checker = getTestSelectModifyTableChecker();
// First, create a role for creating a database and grant role to the user
String readWriteRole = currentFunName();
@@ -346,31 +371,61 @@ public abstract class RangerBaseE2EIT extends BaseIT {
sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
// case 3: Update data in the table
- checkUpdateSQLWithSelectModifyPrivileges();
+ checker.checkUpdateSQL();
// case 4: Delete data from the table.
- checkDeleteSQLWithSelectModifyPrivileges();
+ checker.checkDeleteSQL();
// case 5: Succeed to alter the table
testAlterTable();
// case 6: Fail to drop the table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+ testDropTable();
// case 7: If we don't have the role, we can't insert and select from data.
metalake.deleteRole(readWriteRole);
waitForUpdatingPolicies();
- checkWithoutPrivileges();
+ checker.checkNoPrivSQL();
// Clean up
catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName,
tableName));
catalog.asSchemas().dropSchema(schemaName, false);
+ waitForUpdatingPolicies();
+ }
+
+ TestSelectModifyTableChecker getTestSelectModifyTableChecker() {
+ return new TestSelectModifyTableChecker() {
+ @Override
+ void checkUpdateSQL() {
+ checkUpdateSQLWithSelectModifyPrivileges();
+ }
+
+ @Override
+ void checkDeleteSQL() {
+ checkDeleteSQLWithSelectModifyPrivileges();
+ }
+
+ @Override
+ void checkNoPrivSQL() {
+ checkWithoutPrivileges();
+ }
+ };
+ }
+
+ protected abstract static class TestSelectModifyTableChecker {
+ abstract void checkUpdateSQL();
+
+ abstract void checkDeleteSQL();
+
+ abstract void checkNoPrivSQL();
}
@Test
- void testSelectModifyTableWithTableLevelRole() throws InterruptedException {
+ void testSelectModifyTableWithTableLevelRole() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestSelectModifyTableChecker checker = getTestSelectModifyTableChecker();
// First, create a role for creating a database and grant role to the user
String roleName = currentFunName();
@@ -410,21 +465,21 @@ public abstract class RangerBaseE2EIT extends BaseIT {
sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
// case 3: Update data in the table.
- checkUpdateSQLWithSelectModifyPrivileges();
+ checker.checkUpdateSQL();
// case 4: Delete data from the table.
- checkDeleteSQLWithSelectModifyPrivileges();
+ checker.checkDeleteSQL();
// case 5: Succeed to alter the table
testAlterTable();
// case 6: Fail to drop the table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+ testDropTable();
// case 7: If we don't have the role, we can't insert and select from data.
metalake.deleteRole(roleName);
waitForUpdatingPolicies();
- checkWithoutPrivileges();
+ checker.checkNoPrivSQL();
// Clean up
catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName,
tableName));
@@ -432,9 +487,11 @@ public abstract class RangerBaseE2EIT extends BaseIT {
}
@Test
- void testSelectOnlyTable() throws InterruptedException {
+ void testSelectOnlyTable() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestSelectOnlyTableChecker checker = getTestSelectOnlyTableChecker();
// First, create a role for creating a database and grant role to the user
String readOnlyRole = currentFunName();
@@ -458,37 +515,90 @@ public abstract class RangerBaseE2EIT extends BaseIT {
sparkSession.sql(SQL_CREATE_TABLE);
// case 1: Fail to insert data into table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_INSERT_TABLE));
+ checker.checkInsertSQL();
// case 2: Succeed to select data from the table
sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
// case 3: Update data in the table
- checkUpdateSQLWithSelectPrivileges();
+ checker.checkUpdateSQL();
// case 4: Delete data from the table
- checkDeleteSQLWithSelectPrivileges();
+ checker.checkDeleteSQL();
- // case 5: Fail to alter the table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_ALTER_TABLE));
+ // case 5: Test to alter the table
+ checker.checkAlterSQL();
- // case 6: Fail to drop the table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+ // case 6: Test to drop the table
+ checker.checkDropSQL();
// case 7: If we don't have the role, we can't insert and select from data.
metalake.deleteRole(readOnlyRole);
waitForUpdatingPolicies();
- checkWithoutPrivileges();
+ checker.checkNoPrivSQL();
// Clean up
catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName,
tableName));
catalog.asSchemas().dropSchema(schemaName, false);
}
+ TestSelectOnlyTableChecker getTestSelectOnlyTableChecker() {
+ return new TestSelectOnlyTableChecker() {
+ @Override
+ void checkUpdateSQL() {
+ checkUpdateSQLWithSelectPrivileges();
+ }
+
+ @Override
+ void checkDeleteSQL() {
+ checkDeleteSQLWithSelectPrivileges();
+ }
+
+ @Override
+ void checkNoPrivSQL() {
+ checkWithoutPrivileges();
+ }
+
+ @Override
+ void checkInsertSQL() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_INSERT_TABLE));
+ }
+
+ @Override
+ void checkAlterSQL() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_ALTER_TABLE));
+ }
+
+ @Override
+ void checkDropSQL() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+ }
+ };
+ }
+
+ protected abstract static class TestSelectOnlyTableChecker {
+ abstract void checkUpdateSQL();
+
+ abstract void checkDeleteSQL();
+
+ abstract void checkNoPrivSQL();
+
+ abstract void checkInsertSQL();
+
+ abstract void checkDropSQL();
+
+ abstract void checkAlterSQL();
+ }
+
@Test
- void testModifyOnlyTable() throws InterruptedException {
+ void testModifyOnlyTable() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestModifyOnlyTableChecker checker = getTestModifyOnlyTableChecker();
// First, create a role for creating a database and grant role to the user
String writeOnlyRole = currentFunName();
@@ -518,31 +628,58 @@ public abstract class RangerBaseE2EIT extends BaseIT {
sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
// case 3: Update data in the table
- checkUpdateSQLWithModifyPrivileges();
+ checker.checkUpdateSQL();
// case 4: Delete data from the table
- checkDeleteSQLWithModifyPrivileges();
+ checker.checkDeleteSQL();
// case 5: Succeed to alter the table
testAlterTable();
// case 6: Fail to drop the table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+ testDropTable();
// case 7: If we don't have the role, we can't insert and select from data.
metalake.deleteRole(writeOnlyRole);
waitForUpdatingPolicies();
- checkWithoutPrivileges();
+ checker.checkNoPrivSQL();
// Clean up
catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName,
tableName));
catalog.asSchemas().dropSchema(schemaName, false);
}
+ TestModifyOnlyTableChecker getTestModifyOnlyTableChecker() {
+ return new TestModifyOnlyTableChecker() {
+ @Override
+ void checkUpdateSQL() {
+ checkUpdateSQLWithModifyPrivileges();
+ }
+
+ @Override
+ void checkDeleteSQL() {
+ checkDeleteSQLWithModifyPrivileges();
+ }
+
+ @Override
+ void checkNoPrivSQL() {
+ checkWithoutPrivileges();
+ }
+ };
+ }
+
+ protected abstract static class TestModifyOnlyTableChecker {
+ abstract void checkUpdateSQL();
+
+ abstract void checkDeleteSQL();
+
+ abstract void checkNoPrivSQL();
+ }
+
@Test
- void testCreateAllPrivilegesRole() throws InterruptedException {
+ void testCreateAllPrivilegesRole() {
// Choose a catalog
- useCatalog();
+ reset();
// Create a role
String roleName = currentFunName();
@@ -589,12 +726,13 @@ public abstract class RangerBaseE2EIT extends BaseIT {
}
@Test
- void testDeleteAndRecreateRole() throws InterruptedException {
+ void testDeleteAndRecreateRole() {
// Choose a catalog
- useCatalog();
+ reset();
+ TestDeleteAndRecreateRoleChecker checker =
getTestDeleteAndRecreateRoleChecker();
// Fail to create schema
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_SCHEMA));
+ checker.checkDropSchema();
// Create a role with CREATE_SCHEMA privilege
String roleName = currentFunName();
@@ -613,14 +751,14 @@ public abstract class RangerBaseE2EIT extends BaseIT {
// Succeed to create the schema
sparkSession.sql(SQL_CREATE_SCHEMA);
catalog.asSchemas().dropSchema(schemaName, false);
+ reset();
// Delete the role
metalake.deleteRole(roleName);
waitForUpdatingPolicies();
// Fail to create the schema
- Assertions.assertThrows(
- AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ checker.checkCreateSchema();
// Create the role again
metalake.createRole(roleName, Collections.emptyMap(),
Lists.newArrayList(securableObject));
@@ -637,10 +775,28 @@ public abstract class RangerBaseE2EIT extends BaseIT {
metalake.deleteRole(roleName);
}
+ protected TestDeleteAndRecreateRoleChecker
getTestDeleteAndRecreateRoleChecker() {
+ return new TestDeleteAndRecreateRoleChecker();
+ }
+
+ protected static class TestDeleteAndRecreateRoleChecker {
+ void checkDropSchema() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_SCHEMA));
+ }
+
+ void checkCreateSchema() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+ }
+
@Test
- void testDeleteAndRecreateMetadataObject() throws InterruptedException {
+ void testDeleteAndRecreateMetadataObject() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestDeleteAndRecreateMetadataObject checker =
getTestDeleteAndRecreateMetadataObject();
// Create a role with CREATE_SCHEMA privilege
String roleName = currentFunName();
@@ -659,7 +815,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
// Create a schema
sparkSession.sql(SQL_CREATE_SCHEMA);
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_SCHEMA));
+ checker.checkDropSchema();
// Set owner
MetadataObject schemaObject =
@@ -675,8 +831,6 @@ public abstract class RangerBaseE2EIT extends BaseIT {
// Recreate a schema
sparkSession.sql(SQL_CREATE_SCHEMA);
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_SCHEMA));
-
// Set owner
schemaObject = MetadataObjects.of(catalogName, schemaName,
MetadataObject.Type.SCHEMA);
metalake.setOwner(schemaObject, userName1, Owner.Type.USER);
@@ -687,16 +841,31 @@ public abstract class RangerBaseE2EIT extends BaseIT {
metalake.deleteRole(roleName);
waitForUpdatingPolicies();
- sparkSession.sql(SQL_CREATE_SCHEMA);
+ checker.checkRecreateSchema();
// Clean up
catalog.asSchemas().dropSchema(schemaName, false);
}
+ TestDeleteAndRecreateMetadataObject getTestDeleteAndRecreateMetadataObject()
{
+ return new TestDeleteAndRecreateMetadataObject();
+ }
+
+ protected static class TestDeleteAndRecreateMetadataObject {
+ void checkDropSchema() {
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_SCHEMA));
+ }
+
+ void checkRecreateSchema() {
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+ }
+ }
+
@Test
- void testRenameMetadataObject() throws InterruptedException {
+ void testRenameMetadataObject() {
// Choose a catalog
- useCatalog();
+ reset();
// Create a role with CREATE_SCHEMA and CREATE_TABLE privilege
String roleName = currentFunName();
@@ -732,9 +901,9 @@ public abstract class RangerBaseE2EIT extends BaseIT {
}
@Test
- void testRenameMetadataObjectPrivilege() throws InterruptedException {
+ void testRenameMetadataObjectPrivilege() {
// Choose a catalog
- useCatalog();
+ reset();
// Create a role with CREATE_SCHEMA and CREATE_TABLE privilege
String roleName = currentFunName();
@@ -781,9 +950,11 @@ public abstract class RangerBaseE2EIT extends BaseIT {
}
@Test
- void testChangeOwner() throws InterruptedException {
+ void testChangeOwner() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestChangeOwnerChecker checker = getTestChangeOwnerChecker();
// Create a schema and a table
String helperRole = currentFunName();
@@ -826,8 +997,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName,
tableName));
waitForUpdatingPolicies();
- // Fail to create the table
- Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_TABLE));
+ checker.checkCreateTable();
// case 3. user is the schema owner
MetadataObject schemaObject =
@@ -846,9 +1016,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
catalog.asSchemas().dropSchema(schemaName, false);
waitForUpdatingPolicies();
- // Fail to create schema
- Assertions.assertThrows(
- AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ checker.checkCreateSchema();
// case 4. user is the catalog owner
MetadataObject catalogObject =
@@ -894,10 +1062,30 @@ public abstract class RangerBaseE2EIT extends BaseIT {
catalog.asSchemas().dropSchema(schemaName, false);
}
+ TestChangeOwnerChecker getTestChangeOwnerChecker() {
+ return new TestChangeOwnerChecker();
+ }
+
+ protected static class TestChangeOwnerChecker {
+ void checkCreateTable() {
+ // Fail to create the table
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_TABLE));
+ }
+
+ void checkCreateSchema() {
+ // Fail to create schema
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+ }
+
@Test
protected void testAllowUseSchemaPrivilege() throws InterruptedException {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestAllowUseSchemaPrivilegeChecker checker =
getTestAllowUseSchemaPrivilegeChecker();
// Create a role with CREATE_SCHEMA privilege
String roleName = currentFunName();
@@ -923,13 +1111,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
roleName, catalogObject,
Sets.newHashSet(Privileges.CreateSchema.allow()));
waitForUpdatingPolicies();
- // Use Spark to show this databases(schema)
- Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES);
- dataset1.show();
- List<Row> rows1 = dataset1.collectAsList();
- // The schema should not be shown, because the user does not have the
permission
- Assertions.assertEquals(
- 0, rows1.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+ checker.checkShowSchemas();
// Grant the privilege of using schema
MetadataObject schemaObject =
@@ -938,17 +1120,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
roleName, schemaObject, Sets.newHashSet(Privileges.UseSchema.allow()));
waitForUpdatingPolicies();
- // Use Spark to show this databases(schema) again
- Dataset dataset2 = sparkSession.sql(SQL_SHOW_DATABASES);
- dataset2.show(100, 100);
- List<Row> rows2 = dataset2.collectAsList();
- rows2.stream()
- .filter(row -> row.getString(0).equals(schemaName))
- .findFirst()
- .orElseThrow(() -> new IllegalStateException("Database not found: " +
schemaName));
- // The schema should be shown, because the user has the permission
- Assertions.assertEquals(
- 1, rows2.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+ checker.checkShowSchemasAgain();
// Clean up
catalog.asTableCatalog().purgeTable(NameIdentifier.of(schemaName,
tableName));
@@ -957,10 +1129,40 @@ public abstract class RangerBaseE2EIT extends BaseIT {
metalake.deleteRole(roleName);
}
- @Test
- void testDenyPrivileges() throws InterruptedException {
+ protected TestAllowUseSchemaPrivilegeChecker
getTestAllowUseSchemaPrivilegeChecker() {
+ return new TestAllowUseSchemaPrivilegeChecker();
+ }
+
+ protected static class TestAllowUseSchemaPrivilegeChecker {
+
+ void checkShowSchemas() {
+ // Use Spark to show this databases(schema)
+ Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES);
+ dataset1.show();
+ List<Row> rows1 = dataset1.collectAsList();
+ // The schema should not be shown, because the user does not have the
permission
+ Assertions.assertEquals(
+ 0, rows1.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+ }
+
+ void checkShowSchemasAgain() {
+ // Use Spark to show this databases(schema) again
+ Dataset dataset2 = sparkSession.sql(SQL_SHOW_DATABASES);
+ dataset2.show(100, 100);
+ List<Row> rows2 = dataset2.collectAsList();
+ rows2.stream()
+ .filter(row -> row.getString(0).equals(schemaName))
+ .findFirst()
+ .orElseThrow(() -> new IllegalStateException("Database not found: "
+ schemaName));
+ // The schema should be shown, because the user has the permission
+ Assertions.assertEquals(
+ 1, rows2.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+ }
+ }
+
+ void testDenyPrivileges() {
// Choose a catalog
- useCatalog();
+ reset();
// Create a schema
catalog.asSchemas().createSchema(schemaName, "test",
Collections.emptyMap());
@@ -1023,9 +1225,11 @@ public abstract class RangerBaseE2EIT extends BaseIT {
// ISSUE-5892 Fix to grant privilege for the metalake
@Test
- void testGrantPrivilegesForMetalake() throws InterruptedException {
+ void testGrantPrivilegesForMetalake() {
// Choose a catalog
- useCatalog();
+ reset();
+
+ TestGrantPrivilegesForMetalakeChecker checker =
getTestGrantPrivilegesForMetalakeChecker();
// Create a schema
String roleName = currentFunName();
@@ -1036,10 +1240,7 @@ public abstract class RangerBaseE2EIT extends BaseIT {
roleName,
MetadataObjects.of(null, metalakeName, MetadataObject.Type.METALAKE),
Sets.newHashSet(Privileges.CreateSchema.allow()));
-
- // Fail to create a schema
- Assertions.assertThrows(
- AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ checker.checkCreateSchema();
// Granted this role to the spark execution user `HADOOP_USER_NAME`
String userName1 = testUserName();
@@ -1053,4 +1254,16 @@ public abstract class RangerBaseE2EIT extends BaseIT {
catalog.asSchemas().dropSchema(schemaName, false);
metalake.deleteRole(roleName);
}
+
+ TestGrantPrivilegesForMetalakeChecker
getTestGrantPrivilegesForMetalakeChecker() {
+ return new TestGrantPrivilegesForMetalakeChecker();
+ }
+
+ protected static class TestGrantPrivilegesForMetalakeChecker {
+ void checkCreateSchema() {
+ // Fail to create a schema
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+ }
}
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java
index 3d199da70f..f1e2b14470 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerFilesetIT.java
@@ -51,6 +51,7 @@ import org.apache.gravitino.authorization.ranger.RangerHelper;
import org.apache.gravitino.authorization.ranger.RangerPrivileges;
import org.apache.gravitino.client.GravitinoMetalake;
import org.apache.gravitino.file.Fileset;
+import org.apache.gravitino.file.FilesetChange;
import org.apache.gravitino.integration.test.container.HiveContainer;
import org.apache.gravitino.integration.test.container.RangerContainer;
import org.apache.gravitino.integration.test.util.BaseIT;
@@ -485,9 +486,41 @@ public class RangerFilesetIT extends BaseIT {
return null;
});
+ catalog
+ .asFilesetCatalog()
+ .alterFileset(
+ NameIdentifier.of(schemaName, filenameRole),
FilesetChange.rename("new_name"));
+ UserGroupInformation.createProxyUser(userName,
UserGroupInformation.getCurrentUser())
+ .doAs(
+ (PrivilegedExceptionAction<Void>)
+ () -> {
+ FileSystem userFileSystem =
+ FileSystem.get(
+ new Configuration() {
+ {
+ set("fs.defaultFS", defaultBaseLocation());
+ }
+ });
+ Assertions.assertDoesNotThrow(
+ () ->
+ userFileSystem.listFiles(new
Path(storageLocation(filenameRole)), false));
+ Assertions.assertDoesNotThrow(
+ () ->
+ userFileSystem.mkdirs(
+ new Path(
+ String.format("%s/%s",
storageLocation(filenameRole), "test3"))));
+ userFileSystem.close();
+ return null;
+ });
+ MetadataObject renamedFilesetObject =
+ MetadataObjects.of(
+ String.format("%s.%s", catalogName, schemaName),
+ "new_name",
+ MetadataObject.Type.FILESET);
+
metalake.revokePrivilegesFromRole(
filesetRole,
- filesetObject,
+ renamedFilesetObject,
Sets.newHashSet(Privileges.ReadFileset.allow(),
Privileges.WriteFileset.allow()));
RangerBaseE2EIT.waitForUpdatingPolicies();
UserGroupInformation.createProxyUser(userName,
UserGroupInformation.getCurrentUser())
@@ -515,7 +548,7 @@ public class RangerFilesetIT extends BaseIT {
return null;
});
- catalog.asFilesetCatalog().dropFileset(NameIdentifier.of(schemaName,
fileset.name()));
+ catalog.asFilesetCatalog().dropFileset(NameIdentifier.of(schemaName,
"new_name"));
}
private void createCatalogAndSchema() {
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
index 53c81bd0b9..79e8252a24 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
@@ -112,7 +112,7 @@ public class RangerHiveE2EIT extends RangerBaseE2EIT {
}
@Override
- protected void useCatalog() {
+ protected void reset() {
// Do nothing, default catalog is ok for Hive.
}
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveHdfsE2EIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveHdfsE2EIT.java
index 4289d50291..ea107947c2 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveHdfsE2EIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveHdfsE2EIT.java
@@ -25,20 +25,22 @@ import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
import java.security.PrivilegedExceptionAction;
-import java.util.Arrays;
import java.util.Collections;
+import java.util.List;
import java.util.Map;
import org.apache.gravitino.Catalog;
import org.apache.gravitino.Configs;
+import org.apache.gravitino.MetadataObject;
+import org.apache.gravitino.MetadataObjects;
import org.apache.gravitino.NameIdentifier;
import org.apache.gravitino.auth.AuthConstants;
import org.apache.gravitino.auth.AuthenticatorType;
+import org.apache.gravitino.authorization.Owner;
import org.apache.gravitino.authorization.Privileges;
import org.apache.gravitino.authorization.SecurableObject;
import org.apache.gravitino.authorization.SecurableObjects;
import org.apache.gravitino.authorization.common.RangerAuthorizationProperties;
import org.apache.gravitino.catalog.hive.HiveConstants;
-import org.apache.gravitino.client.GravitinoMetalake;
import org.apache.gravitino.exceptions.UserAlreadyExistsException;
import org.apache.gravitino.integration.test.container.HiveContainer;
import org.apache.gravitino.integration.test.container.RangerContainer;
@@ -52,6 +54,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.spark.SparkUnsupportedOperationException;
+import org.apache.spark.sql.AnalysisException;
+import org.apache.spark.sql.Dataset;
+import org.apache.spark.sql.Row;
+import org.apache.spark.sql.SparkSession;
+import org.apache.spark.sql.catalyst.analysis.NoSuchNamespaceException;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.Assertions;
import org.junit.jupiter.api.BeforeAll;
@@ -61,7 +69,7 @@ import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
@Tag("gravitino-docker-test")
-public class RangerHiveHdfsE2EIT extends BaseIT {
+public class RangerHiveHdfsE2EIT extends RangerBaseE2EIT {
private static final Logger LOG =
LoggerFactory.getLogger(RangerHiveHdfsE2EIT.class);
private static final String provider = "hive";
@@ -69,18 +77,13 @@ public class RangerHiveHdfsE2EIT extends BaseIT {
public static final String HIVE_COL_NAME1 = "hive_col_name1";
public static final String HIVE_COL_NAME2 = "hive_col_name2";
public static final String HIVE_COL_NAME3 = "hive_col_name3";
- private static String metalakeName;
- private static GravitinoMetalake metalake;
- private static String catalogName;
- private static String HIVE_METASTORE_URIS;
- private static Catalog catalog;
+ public static final String testUserName = "test";
private static String DEFAULT_FS;
@BeforeAll
public void startIntegrationTest() throws Exception {
metalakeName = GravitinoITUtils.genRandomName("metalake").toLowerCase();
- catalogName = GravitinoITUtils.genRandomName("catalog").toLowerCase();
// Enable Gravitino Authorization mode
Map<String, String> configs = Maps.newHashMap();
@@ -113,37 +116,18 @@ public class RangerHiveHdfsE2EIT extends BaseIT {
conf.set("fs.defaultFS", DEFAULT_FS);
RangerITEnv.cleanup();
+
try {
metalake.addUser(System.getenv(HADOOP_USER_NAME));
+ metalake.addUser(testUserName);
} catch (UserAlreadyExistsException e) {
LOG.error("Failed to add user: {}", System.getenv(HADOOP_USER_NAME), e);
}
}
@AfterAll
- void cleanIT() {
- if (client != null) {
- Arrays.stream(catalog.asSchemas().listSchemas())
- .filter(schema -> !schema.equals("default"))
- .forEach(
- (schema -> {
- catalog.asSchemas().dropSchema(schema, false);
- }));
-
- // The `dropCatalog` call will invoke the catalog metadata object to
remove privileges
- Arrays.stream(metalake.listCatalogs())
- .forEach((catalogName -> metalake.dropCatalog(catalogName, true)));
- client.disableMetalake(metalakeName);
- client.dropMetalake(metalakeName);
- }
-
- try {
- closer.close();
- } catch (Exception e) {
- LOG.error("Failed to close CloseableGroup", e);
- }
- client = null;
- RangerITEnv.cleanup();
+ public void stop() {
+ cleanIT();
}
protected void createCatalog() {
@@ -173,28 +157,24 @@ public class RangerHiveHdfsE2EIT extends BaseIT {
LOG.info("Catalog created: {}", catalog);
}
- private void createMetalake() {
- GravitinoMetalake[] gravitinoMetalakes = client.listMetalakes();
- Assertions.assertEquals(0, gravitinoMetalakes.length);
-
- client.createMetalake(metalakeName, "comment", Collections.emptyMap());
- GravitinoMetalake loadMetalake = client.loadMetalake(metalakeName);
- Assertions.assertEquals(metalakeName, loadMetalake.name());
-
- metalake = loadMetalake;
- }
-
@Test
public void testRenameTable() throws Exception {
+ cleanSchemaPath();
+ MetadataObject metadataObject =
+ MetadataObjects.of(Lists.newArrayList(metalakeName),
MetadataObject.Type.METALAKE);
+ MetadataObject catalogObject =
+ MetadataObjects.of(Lists.newArrayList(catalogName),
MetadataObject.Type.CATALOG);
+ metalake.setOwner(metadataObject, AuthConstants.ANONYMOUS_USER,
Owner.Type.USER);
+ metalake.setOwner(catalogObject, AuthConstants.ANONYMOUS_USER,
Owner.Type.USER);
+ waitForUpdatingPolicies();
+
// 1. Create a table
TableCatalog tableCatalog = catalog.asTableCatalog();
tableCatalog.createTable(
NameIdentifier.of("default", "test"), createColumns(), "comment1",
ImmutableMap.of());
// 2. check the privileges, should throw an exception
- String userName = "test";
- metalake.addUser(userName);
- UserGroupInformation.createProxyUser(userName,
UserGroupInformation.getCurrentUser())
+ UserGroupInformation.createProxyUser(testUserName,
UserGroupInformation.getCurrentUser())
.doAs(
(PrivilegedExceptionAction<Void>)
() -> {
@@ -214,20 +194,20 @@ public class RangerHiveHdfsE2EIT extends BaseIT {
});
// 3. Grant the privileges to the table
- SecurableObject catalogObject =
+ SecurableObject catalogSecObject =
SecurableObjects.ofCatalog(catalogName, Collections.emptyList());
SecurableObject schemaObject =
- SecurableObjects.ofSchema(catalogObject, "default",
Collections.emptyList());
+ SecurableObjects.ofSchema(catalogSecObject, "default",
Collections.emptyList());
SecurableObject tableObject =
SecurableObjects.ofTable(
schemaObject, "test",
Lists.newArrayList(Privileges.ModifyTable.allow()));
metalake.createRole(
"hdfs_rename_role", Collections.emptyMap(),
Lists.newArrayList(tableObject));
- metalake.grantRolesToUser(Lists.newArrayList("hdfs_rename_role"),
userName);
- RangerBaseE2EIT.waitForUpdatingPolicies();
+ metalake.grantRolesToUser(Lists.newArrayList("hdfs_rename_role"),
testUserName);
+ waitForUpdatingPolicies();
- UserGroupInformation.createProxyUser(userName,
UserGroupInformation.getCurrentUser())
+ UserGroupInformation.createProxyUser(testUserName,
UserGroupInformation.getCurrentUser())
.doAs(
(PrivilegedExceptionAction<Void>)
() -> {
@@ -245,7 +225,7 @@ public class RangerHiveHdfsE2EIT extends BaseIT {
tableCatalog.alterTable(NameIdentifier.of("default", "test"),
TableChange.rename("test1"));
// 5. Check the privileges
- UserGroupInformation.createProxyUser(userName,
UserGroupInformation.getCurrentUser())
+ UserGroupInformation.createProxyUser(testUserName,
UserGroupInformation.getCurrentUser())
.doAs(
(PrivilegedExceptionAction<Void>)
() -> {
@@ -259,6 +239,9 @@ public class RangerHiveHdfsE2EIT extends BaseIT {
userFileSystem.close();
return null;
});
+
+ // 6. Delete the table
+ tableCatalog.dropTable(NameIdentifier.of("default", "test1"));
}
private Column[] createColumns() {
@@ -267,4 +250,275 @@ public class RangerHiveHdfsE2EIT extends BaseIT {
Column col3 = Column.of(HIVE_COL_NAME3, Types.StringType.get(),
"col_3_comment");
return new Column[] {col1, col2, col3};
}
+
+ @Override
+ protected String testUserName() {
+ return testUserName;
+ }
+
+ @Override
+ protected void checkTableAllPrivilegesExceptForCreating() {
+ // - a. Succeed to insert data into the table
+ sparkSession.sql(SQL_INSERT_TABLE);
+
+ // - b. Succeed to select data from the table
+ sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
+
+ // - c: Fail to update data in the table. Because Hive doesn't support
+ Assertions.assertThrows(
+ SparkUnsupportedOperationException.class, () ->
sparkSession.sql(SQL_UPDATE_TABLE));
+
+ // - d: Fail to delete data from the table, Because Hive doesn't support
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_DELETE_TABLE));
+
+ // - e: Succeed to alter the table
+ sparkSession.sql(SQL_ALTER_TABLE);
+
+ // - f: Succeed to drop the table
+ sparkSession.sql(SQL_DROP_TABLE);
+ }
+
+ @Override
+ protected void checkUpdateSQLWithSelectModifyPrivileges() {
+ Assertions.assertThrows(
+ SparkUnsupportedOperationException.class, () ->
sparkSession.sql(SQL_UPDATE_TABLE));
+ }
+
+ @Override
+ protected void checkUpdateSQLWithSelectPrivileges() {
+ Assertions.assertThrows(
+ SparkUnsupportedOperationException.class, () ->
sparkSession.sql(SQL_UPDATE_TABLE));
+ }
+
+ @Override
+ protected void checkUpdateSQLWithModifyPrivileges() {
+ Assertions.assertThrows(
+ SparkUnsupportedOperationException.class, () ->
sparkSession.sql(SQL_UPDATE_TABLE));
+ }
+
+ @Override
+ protected void checkDeleteSQLWithSelectModifyPrivileges() {
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_DELETE_TABLE));
+ }
+
+ @Override
+ protected void checkDeleteSQLWithSelectPrivileges() {
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_DELETE_TABLE));
+ }
+
+ @Override
+ protected void checkDeleteSQLWithModifyPrivileges() {
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_DELETE_TABLE));
+ }
+
+ @Override
+ protected void reset() {
+ cleanSchemaPath();
+ initSparkSession();
+ }
+
+ @Override
+ protected void checkWithoutPrivileges() {
+ // Ignore this, because if we create a schema, maybe we have nearly
+ // whole privileges of this directory.
+ }
+
+ @Override
+ protected void testAlterTable() {
+ // We have added extra test case to test alter, ignore this.
+ }
+
+ @Override
+ protected void testDropTable() {
+ Assertions.assertDoesNotThrow(() -> sparkSession.sql(SQL_DROP_TABLE));
+ }
+
+ private void cleanSchemaPath() {
+ BaseIT.runInEnv(
+ "HADOOP_USER_NAME",
+ "gravitino",
+ () -> {
+ Configuration conf = new Configuration();
+ conf.set("fs.defaultFS", DEFAULT_FS);
+ conf.set("fs.hdfs.impl.disable.cache", "false");
+ try (FileSystem userFileSystem = FileSystem.get(conf)) {
+ String schemaPath = String.format("%s/%s.db", DEFAULT_FS,
schemaName);
+ userFileSystem.delete(new Path(schemaPath), true);
+ } catch (Exception e) {
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ private void initSparkSession() {
+ BaseIT.runInEnv(
+ "HADOOP_USER_NAME",
+ testUserName,
+ () -> {
+ sparkSession =
+ SparkSession.builder()
+ .master("local[1]")
+ .appName("Ranger Hive E2E integration test")
+ .config("hive.metastore.uris", HIVE_METASTORE_URIS)
+ .config(
+ "spark.sql.warehouse.dir",
+ String.format(
+ "hdfs://%s:%d/user/hive/warehouse",
+
containerSuite.getHiveRangerContainer().getContainerIpAddress(),
+ HiveContainer.HDFS_DEFAULTFS_PORT))
+ .config("spark.sql.storeAssignmentPolicy", "LEGACY")
+
.config("mapreduce.input.fileinputformat.input.dir.recursive", "true")
+ .enableHiveSupport()
+ .getOrCreate();
+ sparkSession.sql(SQL_SHOW_DATABASES); // must be called to activate
the Spark session
+ });
+ }
+
+ protected TestCreateSchemaChecker getTestCreateSchemaChecker() {
+ return new TestCreateSchemaChecker() {
+ void checkCreateTable() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to create the table. So we ignore the check
+ }
+ };
+ }
+
+ protected TestCreateTableChecker getTestCreateTableChecker() {
+ return new TestCreateTableChecker() {
+ @Override
+ void checkTableReadWrite() {
+ // If the table path is created by the user, the user can have the
privilege
+ // to read and write the table. So we ignore the check
+ }
+
+ void checkCreateTable() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to create the table. So we ignore the check
+ }
+ };
+ }
+
+ protected TestDeleteAndRecreateRoleChecker
getTestDeleteAndRecreateRoleChecker() {
+ return new TestDeleteAndRecreateRoleChecker() {
+ @Override
+ void checkDropSchema() {
+ Assertions.assertThrows(
+ NoSuchNamespaceException.class, () ->
sparkSession.sql(SQL_DROP_SCHEMA));
+ }
+
+ @Override
+ void checkCreateSchema() {
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+ };
+ }
+
+ @Override
+ TestSelectOnlyTableChecker getTestSelectOnlyTableChecker() {
+ return new TestSelectOnlyTableChecker() {
+ @Override
+ void checkUpdateSQL() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to update the table. So we ignore the check
+ }
+
+ @Override
+ void checkDeleteSQL() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to delete the table. So we ignore the check
+ }
+
+ @Override
+ void checkNoPrivSQL() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to do any operation about the table. So we ignore the check
+ }
+
+ @Override
+ void checkInsertSQL() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to insert the table. So we ignore the check
+ }
+
+ @Override
+ void checkDropSQL() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to drop the table. So we ignore the check
+ }
+
+ @Override
+ void checkAlterSQL() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to alter the table. So we ignore the check
+ }
+ };
+ }
+
+ @Override
+ TestDeleteAndRecreateMetadataObject getTestDeleteAndRecreateMetadataObject()
{
+ return new TestDeleteAndRecreateMetadataObject() {
+ @Override
+ void checkDropSchema() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to alter the table. So we ignore the check
+ }
+
+ @Override
+ void checkRecreateSchema() {
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+ };
+ }
+
+ @Override
+ protected TestAllowUseSchemaPrivilegeChecker
getTestAllowUseSchemaPrivilegeChecker() {
+ return new TestAllowUseSchemaPrivilegeChecker() {
+ @Override
+ void checkShowSchemas() {
+ // Use Spark to show this databases(schema)
+ Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES);
+ dataset1.show();
+ List<Row> rows1 = dataset1.collectAsList();
+ Assertions.assertEquals(
+ 1, rows1.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+ }
+
+ @Override
+ void checkShowSchemasAgain() {
+ // Use Spark to show this databases(schema)
+ Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES);
+ dataset1.show();
+ List<Row> rows1 = dataset1.collectAsList();
+ Assertions.assertEquals(
+ 1, rows1.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+ }
+ };
+ }
+
+ @Override
+ TestGrantPrivilegesForMetalakeChecker
getTestGrantPrivilegesForMetalakeChecker() {
+ return new TestGrantPrivilegesForMetalakeChecker() {
+ @Override
+ void checkCreateSchema() {
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+ };
+ }
+
+ @Override
+ TestChangeOwnerChecker getTestChangeOwnerChecker() {
+ return new TestChangeOwnerChecker() {
+ @Override
+ void checkCreateTable() {
+ // If the schema path is created by the user, the user can have the
privilege
+ // to create the table. So we ignore the check
+ }
+
+ @Override
+ void checkCreateSchema() {
+ reset();
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+ }
+ };
+ }
}
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
index 7990011cd9..78941a225d 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerIcebergE2EIT.java
@@ -185,7 +185,7 @@ public class RangerIcebergE2EIT extends RangerBaseE2EIT {
LOG.info("Catalog created: {}", catalog);
}
- public void useCatalog() {
+ public void reset() {
String userName1 = System.getenv(HADOOP_USER_NAME);
String roleName = currentFunName();
SecurableObject securableObject =
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java
index e045a25fb2..015c444b79 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerPaimonE2EIT.java
@@ -115,7 +115,7 @@ public class RangerPaimonE2EIT extends RangerBaseE2EIT {
}
@Override
- protected void useCatalog() {
+ protected void reset() {
String userName1 = System.getenv(HADOOP_USER_NAME);
String roleName = currentFunName();
SecurableObject securableObject =