xunliu commented on code in PR #5321:
URL: https://github.com/apache/gravitino/pull/5321#discussion_r1824035897


##########
core/src/main/java/org/apache/gravitino/hook/TableHookDispatcher.java:
##########
@@ -96,17 +96,35 @@ public Table createTable(
   @Override
   public Table alterTable(NameIdentifier ident, TableChange... changes)
       throws NoSuchTableException, IllegalArgumentException {
-    return dispatcher.alterTable(ident, changes);
+
+    Table alteredTable = dispatcher.alterTable(ident, changes);
+    TableChange.RenameTable lastRenameChange = null;
+    for (TableChange change : changes) {
+      if (change instanceof TableChange.RenameTable) {
+        lastRenameChange = (TableChange.RenameTable) change;
+      }
+    }
+
+    if (lastRenameChange != null) {
+      AuthorizationUtils.authorizationPluginRenamePrivileges(
+          ident, Entity.EntityType.TABLE, lastRenameChange.getNewName());
+    }
+
+    return alteredTable;
   }
 
   @Override
   public boolean dropTable(NameIdentifier ident) {
-    return dispatcher.dropTable(ident);
+    boolean dropped = dispatcher.dropTable(ident);
+    AuthorizationUtils.authorizationPluginRemovePrivileges(ident, 
Entity.EntityType.TABLE);

Review Comment:
   Maybe we needs add judgement in here
   ```
   if (dropped) {
       AuthorizationUtils.authorizationPluginRemovePrivileges(ident, 
Entity.EntityType.TABLE);
   }
   ```
   Also other place have same problem.



##########
authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java:
##########
@@ -337,10 +350,80 @@ void testReadWriteTable() throws InterruptedException {
     // case 6: Fail to drop the table
     Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DROP_TABLE));
 
+    // case 7: If we don't have the role, we can't insert and select from data.
+    metalake.deleteRole(readWriteRole);
+    waitForUpdatingPolicies();
+    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_INSERT_TABLE));
+    Assertions.assertThrows(
+        AccessControlException.class, () -> 
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
+
+    // Clean up
+    catalog.asTableCatalog().dropTable(NameIdentifier.of(schemaName, 
tableName));
+    catalog.asSchemas().dropSchema(schemaName, true);
+  }
+
+  @Test
+  void testReadWriteTableWithTableLevelRole() throws InterruptedException {
+    // First, create a role for creating a database and grant role to the user
+    String roleName = currentFunName();
+    SecurableObject securableObject =
+        SecurableObjects.ofMetalake(
+            metalakeName,
+            Lists.newArrayList(
+                Privileges.UseSchema.allow(),
+                Privileges.CreateSchema.allow(),
+                Privileges.CreateTable.allow()));
+    String userName1 = System.getenv(HADOOP_USER_NAME);
+    metalake.createRole(roleName, Collections.emptyMap(), 
Lists.newArrayList(securableObject));
+    metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+    waitForUpdatingPolicies();
+    // Second, create a schema
+    sparkSession.sql(SQL_CREATE_SCHEMA);
+
+    // Third, create a table
+    sparkSession.sql(SQL_USE_SCHEMA);
+    sparkSession.sql(SQL_CREATE_TABLE);
+
+    // Fourth, revoke and grant a table level role
+    metalake.deleteRole(roleName);
+    securableObject =
+        SecurableObjects.parse(
+            String.format("%s.%s.%s", catalogName, schemaName, tableName),
+            MetadataObject.Type.TABLE,
+            Lists.newArrayList(Privileges.ModifyTable.allow(), 
Privileges.SelectTable.allow()));
+    metalake.createRole(roleName, Collections.emptyMap(), 
Lists.newArrayList(securableObject));
+    metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+    waitForUpdatingPolicies();
+
+    // case 1: Succeed to insert data into table
+    sparkSession.sql(SQL_INSERT_TABLE);
+
+    // case 2: Succeed to select data from the table
+    sparkSession.sql(SQL_SELECT_TABLE).collectAsList();
+
+    // case 3: Fail to update data in the table, Because Hive doesn't support.
+    Assertions.assertThrows(
+        SparkUnsupportedOperationException.class, () -> 
sparkSession.sql(SQL_UPDATE_TABLE));
+
+    // case 4: Fail to delete data from the table, Because Hive doesn't 
support.
+    Assertions.assertThrows(AnalysisException.class, () -> 
sparkSession.sql(SQL_DELETE_TABLE));
+
+    // case 5: Succeed to alter the table
+    sparkSession.sql(SQL_ALTER_TABLE);
+
+    // case 6: Fail to drop the table
+    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DROP_TABLE));
+
+    // case 7: If we don't have the role, we can't insert and select from data.
+    metalake.deleteRole(roleName);
+    waitForUpdatingPolicies();
+    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_INSERT_TABLE));
+    Assertions.assertThrows(
+        AccessControlException.class, () -> 
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());
+

Review Comment:
   I think better to check all SQL operations.
   1. SQL_CREATE_SCHEMA
   2. SQL_USE_SCHEMA
   3. SQL_CREATE_TABLE
   4. SQL_INSERT_TABLE
   5. SQL_SELECT_TABLE
   6. SQL_UPDATE_TABLE
   7. SQL_DELETE_TABLE
   8. SQL_DROP_TABLE



##########
core/src/main/java/org/apache/gravitino/hook/CatalogHookDispatcher.java:
##########
@@ -104,17 +104,30 @@ public Catalog createCatalog(
   @Override
   public Catalog alterCatalog(NameIdentifier ident, CatalogChange... changes)
       throws NoSuchCatalogException, IllegalArgumentException {
-    return dispatcher.alterCatalog(ident, changes);
+    Catalog alteredCatalog = dispatcher.alterCatalog(ident, changes);
+    CatalogChange.RenameCatalog lastRenameChange = null;
+    for (CatalogChange change : changes) {
+      if (change instanceof CatalogChange.RenameCatalog) {
+        lastRenameChange = (CatalogChange.RenameCatalog) change;
+      }
+    }
+    if (lastRenameChange != null) {
+      AuthorizationUtils.authorizationPluginRenamePrivileges(
+          ident, Entity.EntityType.CATALOG, lastRenameChange.getNewName());
+    }
+    return alteredCatalog;
   }
 
   @Override
   public boolean dropCatalog(NameIdentifier ident) {
+    AuthorizationUtils.authorizationPluginRemovePrivileges(ident, 
Entity.EntityType.CATALOG);
     return dispatcher.dropCatalog(ident);

Review Comment:
   I think maybe we can call `dropCatalog(ident, /*default value*/)` in there, 
to avoid duplicate call `return dispatcher.dropCatalog(ident);` ?



##########
authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java:
##########
@@ -337,10 +350,80 @@ void testReadWriteTable() throws InterruptedException {
     // case 6: Fail to drop the table
     Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DROP_TABLE));
 
+    // case 7: If we don't have the role, we can't insert and select from data.
+    metalake.deleteRole(readWriteRole);
+    waitForUpdatingPolicies();
+    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_INSERT_TABLE));
+    Assertions.assertThrows(
+        AccessControlException.class, () -> 
sparkSession.sql(SQL_SELECT_TABLE).collectAsList());

Review Comment:
   I think better to check all SQL operations.
   1. SQL_CREATE_SCHEMA
   2. SQL_USE_SCHEMA
   3. SQL_CREATE_TABLE
   4. SQL_INSERT_TABLE
   5. SQL_SELECT_TABLE
   6. SQL_UPDATE_TABLE
   7. SQL_DELETE_TABLE
   8. SQL_DROP_TABLE



##########
authorizations/authorization-ranger/src/test/java/org/apache/gravitino/authorization/ranger/integration/test/RangerHiveE2EIT.java:
##########
@@ -527,6 +625,271 @@ void testDeleteAndRecreateRole() throws 
InterruptedException {
     metalake.deleteRole(roleName);
   }
 
+  @Test
+  void testDeleteAndRecreateMetadataObject() throws InterruptedException {
+    // Create a role with CREATE_SCHEMA privilege
+    String roleName = currentFunName();
+    SecurableObject securableObject =
+        SecurableObjects.parse(
+            String.format("%s", catalogName),
+            MetadataObject.Type.CATALOG,
+            Lists.newArrayList(Privileges.CreateSchema.allow()));
+    metalake.createRole(roleName, Collections.emptyMap(), 
Lists.newArrayList(securableObject));
+
+    // Granted this role to the spark execution user `HADOOP_USER_NAME`
+    String userName1 = System.getenv(HADOOP_USER_NAME);
+    metalake.grantRolesToUser(Lists.newArrayList(roleName), userName1);
+    waitForUpdatingPolicies();
+
+    // Create a schema
+    sparkSession.sql(SQL_CREATE_SCHEMA);
+
+    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DROP_SCHEMA));
+
+    // Set owner
+    MetadataObject schemaObject =
+        MetadataObjects.of(catalogName, schemaName, 
MetadataObject.Type.SCHEMA);
+    metalake.setOwner(schemaObject, userName1, Owner.Type.USER);
+    waitForUpdatingPolicies();
+
+    // Delete a schema
+    sparkSession.sql(SQL_DROP_SCHEMA);
+    catalog.asSchemas().dropSchema(schemaName, true);
+    waitForUpdatingPolicies();
+
+    // Recreate a schema
+    sparkSession.sql(SQL_CREATE_SCHEMA);
+
+    Assertions.assertThrows(AccessControlException.class, () -> 
sparkSession.sql(SQL_DROP_SCHEMA));
+
+    // Set owner
+    schemaObject = MetadataObjects.of(catalogName, schemaName, 
MetadataObject.Type.SCHEMA);
+    metalake.setOwner(schemaObject, userName1, Owner.Type.USER);
+    waitForUpdatingPolicies();
+    sparkSession.sql(SQL_DROP_SCHEMA);
+
+    // Delete the role and fail to create schema
+    metalake.deleteRole(roleName);
+    waitForUpdatingPolicies();
+    ;

Review Comment:
   Please remove this `;`



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]

Reply via email to