This is an automated email from the ASF dual-hosted git repository.
liuxun pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/gravitino.git
The following commit(s) were added to refs/heads/main by this push:
new b5ff4baba [#5180][part-1] improvement(test): Add more integration
tests about access control (#5190)
b5ff4baba is described below
commit b5ff4baba725b6a47fb999383ed6d55547f5b367
Author: roryqi <[email protected]>
AuthorDate: Mon Oct 28 18:54:44 2024 +0800
[#5180][part-1] improvement(test): Add more integration tests about access
control (#5190)
### What changes were proposed in this pull request?
1) Add more integration tests about access control.
2) Fix the can't create a role in authorization plugin with
`CREATE_ROLE`, `MANAGE_USERS`.
3) Fix creating a role contains metalake, it won't take effect.
### Why are the changes needed?
Fix: #5180
### Does this PR introduce _any_ user-facing change?
No.
### How was this patch tested?
Just tests.
---
.../authorization-ranger/build.gradle.kts | 3 +
.../ranger/RangerAuthorizationHivePlugin.java | 4 +
.../ranger/RangerAuthorizationPlugin.java | 9 -
.../test/RangerAuthorizationPluginIT.java | 45 +--
.../ranger/integration/test/RangerHiveE2EIT.java | 446 ++++++++++++++++++---
build.gradle.kts | 6 +-
.../authorization/AuthorizationUtils.java | 8 +-
7 files changed, 419 insertions(+), 102 deletions(-)
diff --git a/authorizations/authorization-ranger/build.gradle.kts
b/authorizations/authorization-ranger/build.gradle.kts
index 13f4cc753..66341d9b0 100644
--- a/authorizations/authorization-ranger/build.gradle.kts
+++ b/authorizations/authorization-ranger/build.gradle.kts
@@ -125,6 +125,9 @@ tasks {
}
tasks.test {
+ doFirst {
+ environment("HADOOP_USER_NAME", "test")
+ }
dependsOn(":catalogs:catalog-hive:jar", ":catalogs:catalog-hive:runtimeJars")
val skipITs = project.hasProperty("skipITs")
diff --git
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java
index 83031b764..c88f57f8e 100644
---
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java
+++
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHivePlugin.java
@@ -226,6 +226,10 @@ public class RangerAuthorizationHivePlugin extends
RangerAuthorizationPlugin {
.forEach(
gravitinoPrivilege -> {
Set<RangerPrivilege> rangerPrivileges = new HashSet<>();
+ // Ignore unsupported privileges
+ if
(!privilegesMappingRule().containsKey(gravitinoPrivilege.name())) {
+ return;
+ }
privilegesMappingRule().get(gravitinoPrivilege.name()).stream()
.forEach(
rangerPrivilege ->
diff --git
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
index e0da6b904..03376cb07 100644
---
a/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
+++
b/authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationPlugin.java
@@ -681,15 +681,6 @@ public abstract class RangerAuthorizationPlugin
securableObject.privileges().stream()
.forEach(
privilege -> {
- if (!allowPrivilegesRule().contains(privilege.name()))
{
- LOG.error(
- "Authorization to ignore privilege({}) on
metadata object({})!",
- privilege.name(),
- securableObject.fullName());
- match.set(false);
- return;
- }
-
if (!privilege.canBindTo(securableObject.type())) {
LOG.error(
"The privilege({}) is not supported for the
metadata object({})!",
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java
index 32eb58d18..21ab5dc55 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerAuthorizationPluginIT.java
@@ -299,11 +299,11 @@ public class RangerAuthorizationPluginIT {
String.format("catalog.schema"),
MetadataObject.Type.SCHEMA,
Lists.newArrayList(Privileges.ReadFileset.allow()));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createFilesetInMetalake)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createFilesetInCatalog)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createFilesetInSchema)));
// Ignore the Topic operation
@@ -327,13 +327,13 @@ public class RangerAuthorizationPluginIT {
String.format("catalog.schema.fileset"),
MetadataObject.Type.FILESET,
Lists.newArrayList(Privileges.WriteFileset.allow()));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFilesetInMetalake)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFilesetInCatalog)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFilesetInScheam)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(writeFileset)));
// Ignore the Fileset operation
@@ -357,14 +357,13 @@ public class RangerAuthorizationPluginIT {
String.format("catalog.schema.table"),
MetadataObject.Type.FILESET,
Lists.newArrayList(Privileges.ReadFileset.allow()));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFilesetInMetalake)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFilesetInCatalog)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFilesetInSchema)));
- Assertions.assertFalse(
-
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFileset)));
+
Assertions.assertTrue(rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(readFileset)));
// Ignore the Topic operation
SecurableObject createTopicInMetalake =
@@ -382,11 +381,11 @@ public class RangerAuthorizationPluginIT {
String.format("catalog.schema"),
MetadataObject.Type.SCHEMA,
Lists.newArrayList(Privileges.CreateTopic.allow()));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createTopicInMetalake)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createTopicInCatalog)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(createTopicInSchema)));
SecurableObject produceTopicInMetalake =
@@ -409,13 +408,13 @@ public class RangerAuthorizationPluginIT {
String.format("catalog.schema.fileset"),
MetadataObject.Type.TOPIC,
Lists.newArrayList(Privileges.ProduceTopic.allow()));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopicInMetalake)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopicInCatalog)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopicInSchema)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(produceTopic)));
SecurableObject consumeTopicInMetalake =
@@ -438,13 +437,13 @@ public class RangerAuthorizationPluginIT {
String.format("catalog.schema.topic"),
MetadataObject.Type.TOPIC,
Lists.newArrayList(Privileges.ConsumeTopic.allow()));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopicInMetalake)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopicInCatalog)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopicInSchema)));
- Assertions.assertFalse(
+ Assertions.assertTrue(
rangerAuthPlugin.validAuthorizationOperation(Arrays.asList(consumeTopic)));
}
diff --git
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
index 91597ce8e..91d58bd18 100644
---
a/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
+++
b/authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java
@@ -33,7 +33,6 @@ import com.google.common.collect.Maps;
import java.io.File;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
-import java.time.Instant;
import java.util.Arrays;
import java.util.Collections;
import java.util.List;
@@ -42,7 +41,9 @@ import org.apache.commons.io.FileUtils;
import org.apache.gravitino.Catalog;
import org.apache.gravitino.Configs;
import org.apache.gravitino.MetadataObject;
-import org.apache.gravitino.Schema;
+import org.apache.gravitino.MetadataObjects;
+import org.apache.gravitino.NameIdentifier;
+import org.apache.gravitino.auth.AuthConstants;
import org.apache.gravitino.auth.AuthenticatorType;
import org.apache.gravitino.authorization.Privileges;
import org.apache.gravitino.authorization.SecurableObject;
@@ -54,9 +55,9 @@ import
org.apache.gravitino.integration.test.container.HiveContainer;
import org.apache.gravitino.integration.test.container.RangerContainer;
import org.apache.gravitino.integration.test.util.BaseIT;
import org.apache.gravitino.integration.test.util.GravitinoITUtils;
-import org.apache.gravitino.meta.AuditInfo;
-import org.apache.gravitino.meta.RoleEntity;
-import org.apache.gravitino.meta.UserEntity;
+import org.apache.kyuubi.plugin.spark.authz.AccessControlException;
+import org.apache.spark.SparkUnsupportedOperationException;
+import org.apache.spark.sql.AnalysisException;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Row;
import org.apache.spark.sql.SparkSession;
@@ -73,11 +74,11 @@ public class RangerHiveE2EIT extends BaseIT {
private static final Logger LOG =
LoggerFactory.getLogger(RangerHiveE2EIT.class);
public static final String metalakeName =
- GravitinoITUtils.genRandomName("RangerHiveE2EIT_metalake").toLowerCase();
- public static final String catalogName =
- GravitinoITUtils.genRandomName("RangerHiveE2EIT_catalog").toLowerCase();
- public static final String schemaName =
- GravitinoITUtils.genRandomName("RangerHiveE2EIT_schema").toLowerCase();
+ GravitinoITUtils.genRandomName("metalake").toLowerCase();
+ public static final String catalogName =
GravitinoITUtils.genRandomName("catalog").toLowerCase();
+ public static final String schemaName =
GravitinoITUtils.genRandomName("schema").toLowerCase();
+
+ public static final String tableName =
GravitinoITUtils.genRandomName("table").toLowerCase();
private static GravitinoMetalake metalake;
private static Catalog catalog;
@@ -85,14 +86,34 @@ public class RangerHiveE2EIT extends BaseIT {
private static String HIVE_METASTORE_URIS;
private static SparkSession sparkSession = null;
- private final AuditInfo auditInfo =
-
AuditInfo.builder().withCreator("test").withCreateTime(Instant.now()).build();
private static final String HADOOP_USER_NAME = "HADOOP_USER_NAME";
- private static final String TEST_USER_NAME = "e2e_it_user";
private static final String SQL_SHOW_DATABASES =
String.format("SHOW DATABASES like '%s'", schemaName);
+ private static final String SQL_CREATE_SCHEMA = String.format("CREATE
DATABASE %s", schemaName);
+
+ private static final String SQL_USE_SCHEMA = String.format("USE SCHEMA %s",
schemaName);
+
+ private static final String SQL_CREATE_TABLE =
+ String.format("CREATE TABLE %s (a int, b string, c string)", tableName);
+
+ private static final String SQL_INSERT_TABLE =
+ String.format("INSERT INTO %s (a, b, c) VALUES (1, 'a', 'b')",
tableName);
+
+ private static final String SQL_SELECT_TABLE = String.format("SELECT * FROM
%s", tableName);
+
+ private static final String SQL_UPDATE_TABLE =
+ String.format("UPDATE %s SET b = 'b', c = 'c' WHERE a = 1", tableName);
+
+ private static final String SQL_DELETE_TABLE =
+ String.format("DELETE FROM %s WHERE a = 1", tableName);
+
+ private static final String SQL_ALTER_TABLE =
+ String.format("ALTER TABLE %s ADD COLUMN d string", tableName);
+
+ private static final String SQL_DROP_TABLE = String.format("DROP TABLE %s",
tableName);
+
private static String RANGER_ADMIN_URL = null;
@BeforeAll
@@ -102,7 +123,7 @@ public class RangerHiveE2EIT extends BaseIT {
configs.put(Configs.ENABLE_AUTHORIZATION.getKey(), String.valueOf(true));
configs.put(Configs.SERVICE_ADMINS.getKey(), RangerITEnv.HADOOP_USER_NAME);
configs.put(Configs.AUTHENTICATORS.getKey(),
AuthenticatorType.SIMPLE.name().toLowerCase());
- configs.put("SimpleAuthUserName", TEST_USER_NAME);
+ configs.put("SimpleAuthUserName", AuthConstants.ANONYMOUS_USER);
registerCustomConfigs(configs);
super.startIntegrationTest();
@@ -143,6 +164,8 @@ public class RangerHiveE2EIT extends BaseIT {
createMetalake();
createCatalog();
+
+ metalake.addUser("test");
}
private static void generateRangerSparkSecurityXML() throws IOException {
@@ -204,52 +227,347 @@ public class RangerHiveE2EIT extends BaseIT {
}
@Test
- void testAllowUseSchemaPrivilege() throws InterruptedException {
- // First, create a schema use Gravitino client
- createSchema();
+ void testCreateSchema() throws InterruptedException {
+ // First, fail to create the schema
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
- // Use Spark to show this databases(schema)
- Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES);
- dataset1.show();
- List<Row> rows1 = dataset1.collectAsList();
- // The schema should not be shown, because the user does not have the
permission
- Assertions.assertEquals(
- 0, rows1.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+ // Second, grant the `CREATE_SCHEMA` role
+ String userName1 = System.getenv(HADOOP_USER_NAME);
+ String roleName = currentFunName();
+ SecurableObject securableObject =
+ SecurableObjects.ofMetalake(
+ metalakeName, Lists.newArrayList(Privileges.CreateSchema.allow()));
+ metalake.createRole(roleName, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+ metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+ waitForUpdatingPolicies();
+
+ // Third, succeed to create the schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Clean up
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.deleteRole(roleName);
+ }
+
+ @Test
+ void testCreateTable() throws InterruptedException {
+ // First, create a role for creating a database and grant role to the user
+ String createSchemaRole = currentFunName();
+ SecurableObject securableObject =
+ SecurableObjects.ofMetalake(
+ metalakeName,
+ Lists.newArrayList(Privileges.UseSchema.allow(),
Privileges.CreateSchema.allow()));
+ String userName1 = System.getenv(HADOOP_USER_NAME);
+ metalake.createRole(
+ createSchemaRole, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+ metalake.grantRolesToUser(Lists.newArrayList(createSchemaRole), userName1);
+ waitForUpdatingPolicies();
+ // Second, create a schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Third, fail to create a table
+ sparkSession.sql(SQL_USE_SCHEMA);
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_TABLE));
+
+ // Fourth, create a role for creating a table and grant to the user
+ String createTableRole = currentFunName() + "2";
+ securableObject =
+ SecurableObjects.ofMetalake(
+ metalakeName, Lists.newArrayList(Privileges.CreateTable.allow()));
+ metalake.createRole(
+ createTableRole, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+ metalake.grantRolesToUser(Lists.newArrayList(createTableRole), userName1);
+ waitForUpdatingPolicies();
+
+ // Fifth, succeed to create a table
+ sparkSession.sql(SQL_CREATE_TABLE);
+
+ // Sixth, fail to read and write a table
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_INSERT_TABLE));
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
+
+ // Clean up
+ catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName,
tableName));
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.deleteRole(createTableRole);
+ metalake.deleteRole(createSchemaRole);
+ }
+
+ @Test
+ void testReadWriteTable() throws InterruptedException {
+ // First, create a role for creating a database and grant role to the user
+ String readWriteRole = currentFunName();
+ SecurableObject securableObject =
+ SecurableObjects.ofMetalake(
+ metalakeName,
+ Lists.newArrayList(
+ Privileges.UseSchema.allow(),
+ Privileges.CreateSchema.allow(),
+ Privileges.CreateTable.allow(),
+ Privileges.SelectTable.allow(),
+ Privileges.ModifyTable.allow()));
+ String userName1 = System.getenv(HADOOP_USER_NAME);
+ metalake.createRole(readWriteRole, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+ metalake.grantRolesToUser(Lists.newArrayList(readWriteRole), userName1);
+ waitForUpdatingPolicies();
+ // Second, create a schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Third, create a table
+ sparkSession.sql(SQL_USE_SCHEMA);
+ sparkSession.sql(SQL_CREATE_TABLE);
+
+ // case 1: Succeed to insert data into table
+ sparkSession.sql(SQL_INSERT_TABLE);
+
+ // case 2: Succeed to select data from the table
+ sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
+
+ // case 3: Fail to update data in the table, Because Hive doesn't support.
+ Assertions.assertThrows(
+ SparkUnsupportedOperationException.class, () ->
sparkSession.sql(SQL_UPDATE_TABLE));
+
+ // case 4: Fail to delete data from the table, Because Hive doesn't
support.
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_DELETE_TABLE));
+
+ // case 5: Succeed to alter the table
+ sparkSession.sql(SQL_ALTER_TABLE);
+
+ // case 6: Fail to drop the table
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+
+ // Clean up
+ catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName,
tableName));
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.deleteRole(readWriteRole);
+ }
+
+ @Test
+ void testReadOnlyTable() throws InterruptedException {
+ // First, create a role for creating a database and grant role to the user
+ String readOnlyRole = currentFunName();
+ SecurableObject securableObject =
+ SecurableObjects.ofMetalake(
+ metalakeName,
+ Lists.newArrayList(
+ Privileges.UseSchema.allow(),
+ Privileges.CreateSchema.allow(),
+ Privileges.CreateTable.allow(),
+ Privileges.SelectTable.allow()));
+ String userName1 = System.getenv(HADOOP_USER_NAME);
+ metalake.createRole(readOnlyRole, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+ metalake.grantRolesToUser(Lists.newArrayList(readOnlyRole), userName1);
+ waitForUpdatingPolicies();
+ // Second, create a schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Third, create a table
+ sparkSession.sql(SQL_USE_SCHEMA);
+ sparkSession.sql(SQL_CREATE_TABLE);
+
+ // case 1: Fail to insert data into table
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_INSERT_TABLE));
+
+ // case 2: Succeed to select data from the table
+ sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
+
+ // case 3: Fail to alter data in the table
+ Assertions.assertThrows(
+ SparkUnsupportedOperationException.class, () ->
sparkSession.sql(SQL_UPDATE_TABLE));
+
+ // case 4: Fail to delete data from the table
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_DELETE_TABLE));
+
+ // case 5: Fail to alter the table
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_ALTER_TABLE));
+
+ // case 6: Fail to drop the table
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+
+ // Clean up
+ catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName,
tableName));
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.deleteRole(readOnlyRole);
+ }
+
+ @Test
+ void testWriteOnlyTable() throws InterruptedException {
+ // First, create a role for creating a database and grant role to the user
+ String readOnlyRole = currentFunName();
+ SecurableObject securableObject =
+ SecurableObjects.ofMetalake(
+ metalakeName,
+ Lists.newArrayList(
+ Privileges.UseSchema.allow(),
+ Privileges.CreateSchema.allow(),
+ Privileges.CreateTable.allow(),
+ Privileges.ModifyTable.allow()));
+ String userName1 = System.getenv(HADOOP_USER_NAME);
+ metalake.createRole(readOnlyRole, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+ metalake.grantRolesToUser(Lists.newArrayList(readOnlyRole), userName1);
+ waitForUpdatingPolicies();
+ // Second, create a schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Third, create a table
+ sparkSession.sql(SQL_USE_SCHEMA);
+ sparkSession.sql(SQL_CREATE_TABLE);
+
+ // case 1: Succeed to insert data into the table
+ sparkSession.sql(SQL_INSERT_TABLE);
+
+ // case 2: Fail to select data from the table
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
+
+ // case 3: Succeed to update data in the table
+ Assertions.assertThrows(
+ SparkUnsupportedOperationException.class, () ->
sparkSession.sql(SQL_UPDATE_TABLE));
+
+ // case 4: Succeed to delete data from the table
+ Assertions.assertThrows(AnalysisException.class, () ->
sparkSession.sql(SQL_DELETE_TABLE));
+ // case 5: Succeed to alter the table
+ sparkSession.sql(SQL_ALTER_TABLE);
+
+ // case 6: Fail to drop the table
+ Assertions.assertThrows(AccessControlException.class, () ->
sparkSession.sql(SQL_DROP_TABLE));
+
+ // Clean up
+ catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName,
tableName));
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.deleteRole(readOnlyRole);
+ }
+
+ @Test
+ void testCreateAllPrivilegesRole() throws InterruptedException {
+ String roleName = currentFunName();
+ SecurableObject securableObject =
+ SecurableObjects.ofMetalake(
+ metalakeName,
+ Lists.newArrayList(
+ Privileges.CreateCatalog.allow(),
+ Privileges.UseCatalog.allow(),
+ Privileges.UseSchema.allow(),
+ Privileges.CreateSchema.allow(),
+ Privileges.CreateFileset.allow(),
+ Privileges.ReadFileset.allow(),
+ Privileges.WriteFileset.allow(),
+ Privileges.CreateTopic.allow(),
+ Privileges.ConsumeTopic.allow(),
+ Privileges.ProduceTopic.allow(),
+ Privileges.CreateTable.allow(),
+ Privileges.SelectTable.allow(),
+ Privileges.ModifyTable.allow(),
+ Privileges.ManageUsers.allow(),
+ Privileges.ManageGroups.allow(),
+ Privileges.CreateRole.allow()));
+ metalake.createRole(roleName, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+
+ // Granted this role to the spark execution user `HADOOP_USER_NAME`
+ String userName1 = System.getenv(HADOOP_USER_NAME);
+ metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+
+ waitForUpdatingPolicies();
+
+ // Test to create a schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Test to creat a table
+ sparkSession.sql(SQL_USE_SCHEMA);
+ sparkSession.sql(SQL_CREATE_TABLE);
+
+ // Clean up
+ catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName,
tableName));
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.deleteRole(roleName);
+ }
+
+ @Test
+ void testDeleteAndRecreateRole() throws InterruptedException {
+ // Create a role with CREATE_SCHEMA privilege
+ String roleName = currentFunName();
+ SecurableObject securableObject =
+ SecurableObjects.parse(
+ String.format("%s", catalogName),
+ MetadataObject.Type.CATALOG,
+ Lists.newArrayList(Privileges.UseCatalog.allow(),
Privileges.CreateSchema.allow()));
+ metalake.createRole(roleName, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+
+ // Granted this role to the spark execution user `HADOOP_USER_NAME`
+ String userName1 = System.getenv(HADOOP_USER_NAME);
+ metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+ waitForUpdatingPolicies();
+
+ // Succeed to create the schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+ catalog.asSchemas().dropSchema(schemaName, true);
+
+ // Delete the role
+ metalake.deleteRole(roleName);
+ waitForUpdatingPolicies();
+
+ // Fail to create the schema
+ Assertions.assertThrows(
+ AccessControlException.class, () ->
sparkSession.sql(SQL_CREATE_SCHEMA));
+
+ // Create the role again
+ metalake.createRole(roleName, Collections.emptyMap(),
Lists.newArrayList(securableObject));
+
+ // Grant the role again
+ metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+ waitForUpdatingPolicies();
+
+ // Succeed to create the schema
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Clean up
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.deleteRole(roleName);
+ }
+
+ @Test
+ void testAllowUseSchemaPrivilege() throws InterruptedException {
// Create a role with CREATE_SCHEMA privilege
- SecurableObject securableObject1 =
+ String roleName = currentFunName();
+ SecurableObject securableObject =
SecurableObjects.parse(
String.format("%s", catalogName),
MetadataObject.Type.CATALOG,
Lists.newArrayList(Privileges.CreateSchema.allow()));
- RoleEntity role =
- RoleEntity.builder()
- .withId(1L)
- .withName(currentFunName())
- .withAuditInfo(auditInfo)
- .withSecurableObjects(Lists.newArrayList(securableObject1))
- .build();
- RangerITEnv.rangerAuthHivePlugin.onRoleCreated(role);
+ metalake.createRole(roleName, Collections.emptyMap(),
Lists.newArrayList(securableObject));
// Granted this role to the spark execution user `HADOOP_USER_NAME`
String userName1 = System.getenv(HADOOP_USER_NAME);
- UserEntity userEntity1 =
- UserEntity.builder()
- .withId(1L)
- .withName(userName1)
- .withRoleNames(Collections.emptyList())
- .withRoleIds(Collections.emptyList())
- .withAuditInfo(auditInfo)
- .build();
- Assertions.assertTrue(
- RangerITEnv.rangerAuthHivePlugin.onGrantedRolesToUser(
- Lists.newArrayList(role), userEntity1));
-
- // After Ranger Authorization, Must wait a period of time for the Ranger
Spark plugin to update
- // the policy Sleep time must be greater than the policy update interval
- // (ranger.plugin.spark.policy.pollIntervalMs) in the
- // `resources/ranger-spark-security.xml.template`
- Thread.sleep(1000L);
+ metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+ waitForUpdatingPolicies();
+
+ // create a schema use Gravitino client
+ sparkSession.sql(SQL_CREATE_SCHEMA);
+
+ // Revoke the privilege of creating schema
+ MetadataObject catalogObject =
+ MetadataObjects.of(null, catalogName, MetadataObject.Type.CATALOG);
+ metalake.revokePrivilegesFromRole(
+ roleName, catalogObject,
Lists.newArrayList(Privileges.CreateSchema.allow()));
+ waitForUpdatingPolicies();
+
+ // Use Spark to show this databases(schema)
+ Dataset dataset1 = sparkSession.sql(SQL_SHOW_DATABASES);
+ dataset1.show();
+ List<Row> rows1 = dataset1.collectAsList();
+ // The schema should not be shown, because the user does not have the
permission
+ Assertions.assertEquals(
+ 0, rows1.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+
+ // Grant the privilege of using schema
+ MetadataObject schemaObject =
+ MetadataObjects.of(catalogName, schemaName,
MetadataObject.Type.SCHEMA);
+ metalake.grantPrivilegesToRole(
+ roleName, schemaObject,
Lists.newArrayList(Privileges.UseSchema.allow()));
+ waitForUpdatingPolicies();
// Use Spark to show this databases(schema) again
Dataset dataset2 = sparkSession.sql(SQL_SHOW_DATABASES);
@@ -262,6 +580,12 @@ public class RangerHiveE2EIT extends BaseIT {
// The schema should be shown, because the user has the permission
Assertions.assertEquals(
1, rows2.stream().filter(row ->
row.getString(0).equals(schemaName)).count());
+
+ // Clean up
+ catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName,
tableName));
+ catalog.asSchemas().dropSchema(schemaName, true);
+ metalake.revokeRolesFromUser(Lists.newArrayList(roleName), userName1);
+ metalake.deleteRole(roleName);
}
private void createMetalake() {
@@ -300,21 +624,11 @@ public class RangerHiveE2EIT extends BaseIT {
LOG.info("Catalog created: {}", catalog);
}
- private static void createSchema() {
- Map<String, String> properties = Maps.newHashMap();
- properties.put("key1", "val1");
- properties.put("key2", "val2");
- properties.put(
- "location",
- String.format(
- "hdfs://%s:%d/user/hive/warehouse/%s.db",
- containerSuite.getHiveRangerContainer().getContainerIpAddress(),
- HiveContainer.HDFS_DEFAULTFS_PORT,
- schemaName.toLowerCase()));
- String comment = "comment";
-
- catalog.asSchemas().createSchema(schemaName, comment, properties);
- Schema loadSchema = catalog.asSchemas().loadSchema(schemaName);
- Assertions.assertEquals(schemaName.toLowerCase(), loadSchema.name());
+ private static void waitForUpdatingPolicies() throws InterruptedException {
+ // After Ranger authorization, Must wait a period of time for the Ranger
Spark plugin to update
+ // the policy Sleep time must be greater than the policy update interval
+ // (ranger.plugin.spark.policy.pollIntervalMs) in the
+ // `resources/ranger-spark-security.xml.template`
+ Thread.sleep(1000L);
}
}
diff --git a/build.gradle.kts b/build.gradle.kts
index c84844906..6b272101e 100644
--- a/build.gradle.kts
+++ b/build.gradle.kts
@@ -163,7 +163,11 @@ allprojects {
// Default use MiniGravitino to run integration tests
param.environment("GRAVITINO_ROOT_DIR", project.rootDir.path)
param.environment("IT_PROJECT_DIR", project.buildDir.path)
- param.environment("HADOOP_USER_NAME", "anonymous")
+ // If the environment variable `HADOOP_USER_NAME` is not customized in
submodule,
+ // then set it to "anonymous"
+ if (param.environment["HADOOP_USER_NAME"] == null) {
+ param.environment("HADOOP_USER_NAME", "anonymous")
+ }
param.environment("HADOOP_HOME", "/tmp")
param.environment("PROJECT_VERSION", project.version)
diff --git
a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java
b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java
index 147d66eef..42dd9f830 100644
---
a/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java
+++
b/core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java
@@ -149,9 +149,11 @@ public class AuthorizationUtils {
CatalogManager catalogManager =
GravitinoEnv.getInstance().catalogManager();
for (SecurableObject securableObject : securableObjects) {
if (needApplyAuthorizationPluginAllCatalogs(securableObject)) {
- Catalog[] catalogs =
catalogManager.listCatalogsInfo(Namespace.of(metalake));
- for (Catalog catalog : catalogs) {
- callAuthorizationPluginImpl(consumer, catalog);
+ NameIdentifier[] catalogs =
catalogManager.listCatalogs(Namespace.of(metalake));
+ // ListCatalogsInfo return `CatalogInfo` instead of `BaseCatalog`, we
need `BaseCatalog` to
+ // call authorization plugin method.
+ for (NameIdentifier catalog : catalogs) {
+ callAuthorizationPluginImpl(consumer,
catalogManager.loadCatalog(catalog));
}
} else if (needApplyAuthorization(securableObject.type())) {