xunliu commented on code in PR #6100:
URL: https://github.com/apache/gravitino/pull/6100#discussion_r1905195160


##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,281 @@ public List<String> policyResourceDefinesRule() {
     return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
   }
 
+  String getAuthorizationPath(PathBasedMetadataObject pathBasedMetadataObject) 
{
+    return HDFS_PATTERN.matcher(pathBasedMetadataObject.path()).replaceAll("");
+  }
+
+  /**
+   * Find the managed policy for the ranger securable object.
+   *
+   * @param authzMetadataObject The ranger securable object to find the 
managed policy.
+   * @return The managed policy for the metadata object.
+   */
+  @Override
+  public RangerPolicy findManagedPolicy(AuthorizationMetadataObject 
authzMetadataObject)
+      throws AuthorizationPluginException {
+    List<String> nsMetadataObj = authzMetadataObject.names();
+    PathBasedMetadataObject pathAuthzMetadataObject = 
(PathBasedMetadataObject) authzMetadataObject;
+    Map<String, String> preciseFilters = new HashMap<>();
+    for (int i = 0; i < nsMetadataObj.size() && i < 
policyResourceDefinesRule().size(); i++) {
+      preciseFilters.put(
+          policyResourceDefinesRule().get(i), 
getAuthorizationPath(pathAuthzMetadataObject));
+    }
+    return preciseFindPolicy(authzMetadataObject, preciseFilters);
+  }
+
+  @Override
+  /** Wildcard search the Ranger policies in the different Ranger service. */
+  protected List<RangerPolicy> wildcardSearchPolies(
+      AuthorizationMetadataObject authzMetadataObject) {
+    Preconditions.checkArgument(authzMetadataObject instanceof 
PathBasedMetadataObject);
+    PathBasedMetadataObject pathBasedMetadataObject = 
(PathBasedMetadataObject) authzMetadataObject;
+    List<String> resourceDefines = policyResourceDefinesRule();
+    Map<String, String> searchFilters = new HashMap<>();
+    searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+    resourceDefines.stream()
+        .forEach(
+            resourceDefine -> {
+              searchFilters.put(
+                  SearchFilter.RESOURCE_PREFIX + resourceDefine,
+                  getAuthorizationPath(pathBasedMetadataObject));
+            });
+    try {
+      return rangerClient.findPolicies(searchFilters);
+    } catch (RangerServiceException e) {
+      throw new AuthorizationPluginException(e, "Failed to find the policies 
in the Ranger");
+    }
+  }
+
+  /**
+   * If rename the SCHEMA, Need to rename these the relevant policies, 
`{schema}`, `{schema}.*`,
+   * `{schema}.*.*` <br>
+   * If rename the TABLE, Need to rename these the relevant policies, 
`{schema}.*`, `{schema}.*.*`
+   * <br>
+   */
+  @Override
+  protected void doRenameMetadataObject(
+      AuthorizationMetadataObject authzMetadataObject,
+      AuthorizationMetadataObject newAuthzMetadataObject) {
+    Preconditions.checkArgument(
+        authzMetadataObject instanceof PathBasedMetadataObject,
+        "The metadata object must be a PathBasedMetadataObject");
+    Preconditions.checkArgument(
+        newAuthzMetadataObject instanceof PathBasedMetadataObject,
+        "The metadata object must be a PathBasedMetadataObject");
+    updatePolicyByMetadataObject(
+        newAuthzMetadataObject.type().metadataObjectType(),
+        authzMetadataObject,
+        newAuthzMetadataObject);
+  }
+
+  @Override
+  protected void updatePolicyByMetadataObject(
+      MetadataObject.Type operationType,
+      AuthorizationMetadataObject oldAuthzMetaObject,
+      AuthorizationMetadataObject newAuthzMetaObject) {
+    PathBasedMetadataObject newPathBasedMetadataObject =
+        (PathBasedMetadataObject) newAuthzMetaObject;
+    List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaObject);
+    List<RangerPolicy> existNewPolicies = 
wildcardSearchPolies(newAuthzMetaObject);
+    if (oldPolicies.isEmpty()) {
+      LOG.warn("Cannot find the Ranger policy for the metadata object({})!", 
oldAuthzMetaObject);
+      return;
+    }
+    if (!existNewPolicies.isEmpty()) {
+      LOG.warn("The Ranger policy for the metadata object({}) already 
exists!", newAuthzMetaObject);
+    }
+    oldPolicies.stream()
+        .forEach(
+            policy -> {
+              try {
+                // Update the policy name is following Gravitino's spec
+                
policy.setName(getAuthorizationPath(newPathBasedMetadataObject));
+                // Update the policy resource name to new name
+                policy
+                    .getResources()
+                    .put(
+                        rangerHelper.policyResourceDefines.get(0),
+                        new RangerPolicy.RangerPolicyResource(
+                            getAuthorizationPath(newPathBasedMetadataObject)));
+
+                boolean alreadyExist =
+                    existNewPolicies.stream()
+                        .anyMatch(
+                            existNewPolicy ->
+                                
existNewPolicy.getName().equals(policy.getName())
+                                    || 
existNewPolicy.getResources().equals(policy.getResources()));
+                if (alreadyExist) {
+                  LOG.warn(
+                      "The Ranger policy for the metadata object({}) already 
exists!",
+                      newAuthzMetaObject);
+                  return;
+                }
+
+                // Update the policy
+                rangerClient.updatePolicy(policy.getId(), policy);
+              } catch (RangerServiceException e) {
+                LOG.error("Failed to rename the policy {}!", policy);
+                throw new RuntimeException(e);
+              }
+            });
+  }
+
+  /**
+   * If remove the SCHEMA, need to remove these the relevant policies, 
`{schema}`, `{schema}.*`,
+   * `{schema}.*.*` <br>
+   * If remove the TABLE, need to remove these the relevant policies, 
`{schema}.*`, `{schema}.*.*`
+   * <br>
+   * If remove the COLUMN, Only need to remove `{schema}.*.*` <br>
+   */
+  @Override
+  protected void doRemoveMetadataObject(AuthorizationMetadataObject 
authzMetadataObject) {
+    if (authzMetadataObject.type().equals(SCHEMA)) {
+      doRemoveSchemaMetadataObject(authzMetadataObject);
+    } else if (authzMetadataObject.type().equals(TABLE)) {
+      doRemoveTableMetadataObject(authzMetadataObject);
+    } else if (authzMetadataObject.type().equals(COLUMN)
+        || authzMetadataObject.type().equals(PATH)) {
+      removePolicyByMetadataObject(authzMetadataObject);

Review Comment:
   OK, unified function name.



##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
     return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
   }
 
+  /**
+   * Find the managed policy for the ranger securable object.
+   *
+   * @param authzMetadataObject The ranger securable object to find the 
managed policy.
+   * @return The managed policy for the metadata object.
+   */
+  public RangerPolicy findManagedPolicy(AuthorizationMetadataObject 
authzMetadataObject)
+      throws AuthorizationPluginException {
+    List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+    if (!policies.isEmpty()) {
+      /**
+       * Because Ranger doesn't support the precise search, Ranger will return 
the policy meets the
+       * wildcard(*,?) conditions, If you use `/a/b` condition to search 
policy, the Ranger will
+       * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely 
filter this research
+       * results.
+       */
+      List<String> nsMetadataObj = authzMetadataObject.names();
+      PathBasedMetadataObject pathAuthzMetadataObject =
+          (PathBasedMetadataObject) authzMetadataObject;
+      Map<String, String> preciseFilters = new HashMap<>();
+      for (int i = 0; i < nsMetadataObj.size() && i < 
policyResourceDefinesRule().size(); i++) {
+        preciseFilters.put(policyResourceDefinesRule().get(i), 
pathAuthzMetadataObject.path());
+      }
+      policies =
+          policies.stream()
+              .filter(
+                  policy ->
+                      policy.getResources().entrySet().stream()
+                          .allMatch(
+                              entry ->
+                                  preciseFilters.containsKey(entry.getKey())
+                                      && entry.getValue().getValues().size() 
== 1
+                                      && entry
+                                          .getValue()
+                                          .getValues()
+                                          
.contains(preciseFilters.get(entry.getKey()))))
+              .collect(Collectors.toList());
+    }
+    // Only return the policies that are managed by Gravitino.
+    if (policies.size() > 1) {
+      throw new AuthorizationPluginException("Each metadata object can have at 
most one policy.");
+    }
+
+    if (policies.isEmpty()) {
+      return null;
+    }
+
+    RangerPolicy policy = policies.get(0);
+    // Delegating Gravitino management policies cannot contain duplicate 
privilege
+    policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+    policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+    
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+    
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+    return policy;
+  }
+
+  @Override
+  /** Wildcard search the Ranger policies in the different Ranger service. */
+  protected List<RangerPolicy> wildcardSearchPolies(
+      AuthorizationMetadataObject authzMetadataObject) {
+    Preconditions.checkArgument(authzMetadataObject instanceof 
PathBasedMetadataObject);
+    PathBasedMetadataObject pathBasedMetadataObject = 
(PathBasedMetadataObject) authzMetadataObject;
+    List<String> resourceDefines = policyResourceDefinesRule();
+    Map<String, String> searchFilters = new HashMap<>();
+    searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+    resourceDefines.stream()
+        .forEach(
+            resourceDefine -> {
+              searchFilters.put(
+                  SearchFilter.RESOURCE_PREFIX + resourceDefine, 
pathBasedMetadataObject.path());
+            });
+    try {
+      List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+      return policies;
+    } catch (RangerServiceException e) {
+      throw new AuthorizationPluginException(e, "Failed to find the policies 
in the Ranger");
+    }
+  }
+
+  /**
+   * IF rename the SCHEMA, Need to rename these the relevant policies, 
`{schema}`, `{schema}.*`,
+   * `{schema}.*.*` <br>
+   * IF rename the TABLE, Need to rename these the relevant policies, 
`{schema}.*`, `{schema}.*.*`
+   * <br>
+   */
+  @Override
+  protected void doRenameMetadataObject(
+      AuthorizationMetadataObject authzMetadataObject,
+      AuthorizationMetadataObject newAuthzMetadataObject) {
+    List<Map<String, String>> loop;
+    if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+      loop =
+          ImmutableList.of(
+              ImmutableMap.of(
+                  authzMetadataObject.names().get(0), 
newAuthzMetadataObject.names().get(0)),
+              ImmutableMap.of(RangerHelper.RESOURCE_ALL, 
RangerHelper.RESOURCE_ALL),
+              ImmutableMap.of(RangerHelper.RESOURCE_ALL, 
RangerHelper.RESOURCE_ALL));
+    } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+      loop =
+          ImmutableList.of(
+              ImmutableMap.of(
+                  authzMetadataObject.names().get(0), 
newAuthzMetadataObject.names().get(0)),
+              ImmutableMap.of(
+                  authzMetadataObject.names().get(1), 
newAuthzMetadataObject.names().get(1)),
+              ImmutableMap.of(RangerHelper.RESOURCE_ALL, 
RangerHelper.RESOURCE_ALL));
+    } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+      loop =
+          ImmutableList.of(
+              ImmutableMap.of(
+                  authzMetadataObject.names().get(0), 
newAuthzMetadataObject.names().get(0)),
+              ImmutableMap.of(
+                  authzMetadataObject.names().get(1), 
newAuthzMetadataObject.names().get(1)),
+              ImmutableMap.of(
+                  authzMetadataObject.names().get(2), 
newAuthzMetadataObject.names().get(2)));
+    } else if (newAuthzMetadataObject.type().equals(PATH)) {
+      // do nothing when fileset is renamed
+      return;
+    } else {
+      throw new IllegalArgumentException(
+          "Unsupported metadata object type: " + authzMetadataObject.type());
+    }
+
+    List<String> oldMetadataNames = new ArrayList<>();
+    List<String> newMetadataNames = new ArrayList<>();
+    for (int index = 0; index < loop.size(); index++) {
+      
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+      
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+      AuthorizationMetadataObject.Type type =
+          (index == 0
+              ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+              : (index == 1
+                  ? RangerHadoopSQLMetadataObject.Type.TABLE
+                  : RangerHadoopSQLMetadataObject.Type.COLUMN));
+      AuthorizationMetadataObject oldHadoopSQLMetadataObject =
+          new RangerHadoopSQLMetadataObject(
+              AuthorizationMetadataObject.getParentFullName(oldMetadataNames),
+              AuthorizationMetadataObject.getLastName(oldMetadataNames),
+              type);
+      AuthorizationMetadataObject newHadoopSQLMetadataObject =
+          new RangerHadoopSQLMetadataObject(
+              AuthorizationMetadataObject.getParentFullName(newMetadataNames),
+              AuthorizationMetadataObject.getLastName(newMetadataNames),
+              type);
+      updatePolicyByMetadataObject(
+          MetadataObject.Type.SCHEMA, oldHadoopSQLMetadataObject, 
newHadoopSQLMetadataObject);
+    }
+  }
+
+  @Override
+  protected void updatePolicyByMetadataObject(
+      MetadataObject.Type operationType,
+      AuthorizationMetadataObject oldAuthzMetaobject,
+      AuthorizationMetadataObject newAuthzMetaobject) {
+    List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaobject);
+    List<RangerPolicy> existNewPolicies = 
wildcardSearchPolies(newAuthzMetaobject);
+    if (oldPolicies.isEmpty()) {
+      LOG.warn("Cannot find the Ranger policy for the metadata object({})!", 
oldAuthzMetaobject);
+    }
+    if (!existNewPolicies.isEmpty()) {
+      LOG.warn("The Ranger policy for the metadata object({}) already 
exists!", newAuthzMetaobject);
+    }
+    Map<MetadataObject.Type, Integer> operationTypeIndex =
+        ImmutableMap.of(
+            MetadataObject.Type.SCHEMA, 0,
+            MetadataObject.Type.TABLE, 1,
+            MetadataObject.Type.COLUMN, 2);
+    oldPolicies.stream()
+        .forEach(
+            policy -> {
+              try {
+                String policyName = policy.getName();
+                int index = operationTypeIndex.get(operationType);
+
+                // Update the policy name is following Gravitino's spec
+                if (policy
+                    .getName()
+                    .equals(
+                        
AuthorizationSecurableObject.DOT_JOINER.join(oldAuthzMetaobject.names()))) {
+                  List<String> policyNames =
+                      Lists.newArrayList(
+                          
AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName));
+                  Preconditions.checkArgument(
+                      policyNames.size() >= oldAuthzMetaobject.names().size(),
+                      String.format("The policy name(%s) is invalid!", 
policyName));
+                  if 
(policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) {
+                    // Doesn't need to rename the policy `*`
+                    return;
+                  }
+                  policyNames.set(index, 
newAuthzMetaobject.names().get(index));
+                  
policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames));
+                }
+                // Update the policy resource name to new name
+                policy
+                    .getResources()
+                    .put(
+                        rangerHelper.policyResourceDefines.get(index),
+                        new RangerPolicy.RangerPolicyResource(
+                            newAuthzMetaobject.names().get(index)));
+
+                boolean alreadyExist =
+                    existNewPolicies.stream()
+                        .anyMatch(
+                            existNewPolicy ->
+                                
existNewPolicy.getName().equals(policy.getName())
+                                    || 
existNewPolicy.getResources().equals(policy.getResources()));
+                if (alreadyExist) {
+                  LOG.warn(
+                      "The Ranger policy for the metadata object({}) already 
exists!",
+                      newAuthzMetaobject);
+                  return;
+                }
+
+                // Update the policy
+                rangerClient.updatePolicy(policy.getId(), policy);
+              } catch (RangerServiceException e) {
+                LOG.error("Failed to rename the policy {}!", policy);
+                throw new RuntimeException(e);
+              }
+            });
+  }
+
+  /**
+   * IF remove the SCHEMA, need to remove these the relevant policies, 
`{schema}`, `{schema}.*`,
+   * `{schema}.*.*` <br>
+   * IF remove the TABLE, need to remove these the relevant policies, 
`{schema}.*`, `{schema}.*.*`
+   * <br>
+   * IF remove the COLUMN, Only need to remove `{schema}.*.*` <br>
+   */
+  @Override
+  protected void doRemoveMetadataObject(AuthorizationMetadataObject 
authzMetadataObject) {
+    if (authzMetadataObject.type().equals(SCHEMA)) {
+      doRemoveSchemaMetadataObject(authzMetadataObject);
+    } else if (authzMetadataObject.type().equals(TABLE)) {
+      doRemoveTableMetadataObject(authzMetadataObject);
+    } else if (authzMetadataObject.type().equals(PATH)) {
+      removePolicyByMetadataObject(authzMetadataObject);
+    } else {
+      throw new IllegalArgumentException(
+          "Unsupported authorization metadata object type: " + 
authzMetadataObject.type());
+    }
+  }
+
+  /**
+   * Remove the SCHEMA, Need to remove these the relevant policies, 
`{schema}`, `{schema}.*`,
+   * `{schema}.*.*` permissions.
+   */
+  private void doRemoveSchemaMetadataObject(AuthorizationMetadataObject 
authzMetadataObject) {
+    Preconditions.checkArgument(
+        authzMetadataObject instanceof PathBasedMetadataObject,
+        "The metadata object must be a PathBasedMetadataObject");
+    Preconditions.checkArgument(
+        authzMetadataObject.type() == SCHEMA, "The metadata object type must 
be SCHEMA");
+    Preconditions.checkArgument(
+        authzMetadataObject.names().size() == 1, "The metadata object names 
must be 1");
+    if (RangerHelper.RESOURCE_ALL.equals(authzMetadataObject.name())) {
+      // Remove all schema in this catalog
+      NameIdentifier[] catalogs =
+          
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+      Arrays.asList(catalogs).stream()
+          .forEach(
+              catalog -> {
+                List<String> catalogLocations =
+                    AuthorizationUtils.getMetadataObjectLocation(
+                        NameIdentifier.of(catalog.name()), 
Entity.EntityType.CATALOG);
+                catalogLocations.stream()
+                    .forEach(
+                        locationPath -> {
+                          AuthorizationMetadataObject catalogMetadataObject =
+                              new PathBasedMetadataObject(
+                                  metalake, catalog.name(), locationPath, 
PATH);
+                          doRemoveSchemaMetadataObject(catalogMetadataObject);
+                        });
+              });
+    } else {
+      // Remove all table in this schema
+      NameIdentifier[] tables =
+          GravitinoEnv.getInstance()
+              .tableDispatcher()
+              .listTables(Namespace.of(authzMetadataObject.name()));
+      Arrays.asList(tables).stream()
+          .forEach(
+              table -> {
+                NameIdentifier identifier =
+                    NameIdentifier.of(authzMetadataObject.name(), 
table.name());
+                List<String> tabLocations =
+                    AuthorizationUtils.getMetadataObjectLocation(
+                        identifier, Entity.EntityType.TABLE);
+                tabLocations.stream()
+                    .forEach(
+                        locationPath -> {
+                          AuthorizationMetadataObject tableMetadataObject =
+                              new PathBasedMetadataObject(
+                                  authzMetadataObject.name(), table.name(), 
locationPath, PATH);
+                          doRemoveTableMetadataObject(tableMetadataObject);
+                        });
+                // Remove schema
+                Schema schema =
+                    GravitinoEnv.getInstance()
+                        .schemaDispatcher()
+                        
.loadSchema(NameIdentifier.of(authzMetadataObject.name()));
+                List<String> schemaLocations =
+                    AuthorizationUtils.getMetadataObjectLocation(
+                        identifier, Entity.EntityType.SCHEMA);
+                schemaLocations.stream()
+                    .forEach(
+                        locationPath -> {
+                          AuthorizationMetadataObject schemaMetadataObject =
+                              new PathBasedMetadataObject(
+                                  authzMetadataObject.name(), schema.name(), 
locationPath, PATH);
+                          removePolicyByMetadataObject(schemaMetadataObject);
+                        });
+              });

Review Comment:
   I already loop delete all tables under this schema.
   ```
         // Remove all table in this schema
         NameIdentifier[] tables =
             GravitinoEnv.getInstance()
                 .tableDispatcher()
                 .listTables(Namespace.of(authzMetadataObject.name()));
   ```



##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +376,127 @@ private static void checkCatalogType(
           catalogIdent, catalog.type(), privilege);
     }
   }
+
+  public static List<String> getMetadataObjectLocation(
+      NameIdentifier ident, Entity.EntityType type) {
+    List<String> locations = new ArrayList<>();
+    MetadataObject metadataObject;
+    try {
+      metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+    } catch (IllegalArgumentException e) {
+      LOG.warn("Illegal argument exception for metadata object %s type %s", 
ident, type, e);
+      return locations;
+    }
+
+    String metalake =
+        (type == Entity.EntityType.METALAKE ? ident.name() : 
ident.namespace().level(0));
+    try {
+      switch (metadataObject.type()) {
+        case METALAKE:
+          {
+            NameIdentifier[] identifiers =
+                
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+            Arrays.stream(identifiers)
+                .collect(Collectors.toList())
+                .forEach(
+                    identifier -> {
+                      Catalog catalogObj =
+                          
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier);
+                      if (catalogObj.provider().equals("hive")) {
+                        Schema schema =
+                            GravitinoEnv.getInstance()
+                                .schemaDispatcher()
+                                .loadSchema(
+                                    NameIdentifier.of(
+                                        metalake,
+                                        catalogObj.name(),
+                                        "default" /*Hive default schema*/));
+                        if 
(schema.properties().containsKey(HiveConstants.LOCATION)) {
+                          String defaultSchemaLocation =
+                              schema.properties().get(HiveConstants.LOCATION);
+                          if (defaultSchemaLocation != null && 
!defaultSchemaLocation.isEmpty()) {
+                            locations.add(defaultSchemaLocation);
+                          } else {
+                            LOG.warn("Catalog %s location is not found", 
ident);
+                          }
+                        }
+                      }
+                    });
+          }
+          break;
+        case CATALOG:
+          {
+            Catalog catalogObj = 
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(ident);
+            if (catalogObj.provider().equals("hive")) {

Review Comment:
   DONE



##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +376,127 @@ private static void checkCatalogType(
           catalogIdent, catalog.type(), privilege);
     }
   }
+
+  public static List<String> getMetadataObjectLocation(
+      NameIdentifier ident, Entity.EntityType type) {
+    List<String> locations = new ArrayList<>();
+    MetadataObject metadataObject;
+    try {
+      metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+    } catch (IllegalArgumentException e) {
+      LOG.warn("Illegal argument exception for metadata object %s type %s", 
ident, type, e);
+      return locations;
+    }
+
+    String metalake =
+        (type == Entity.EntityType.METALAKE ? ident.name() : 
ident.namespace().level(0));
+    try {
+      switch (metadataObject.type()) {
+        case METALAKE:
+          {
+            NameIdentifier[] identifiers =
+                
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+            Arrays.stream(identifiers)
+                .collect(Collectors.toList())
+                .forEach(
+                    identifier -> {
+                      Catalog catalogObj =
+                          
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier);
+                      if (catalogObj.provider().equals("hive")) {
+                        Schema schema =
+                            GravitinoEnv.getInstance()
+                                .schemaDispatcher()
+                                .loadSchema(
+                                    NameIdentifier.of(
+                                        metalake,
+                                        catalogObj.name(),
+                                        "default" /*Hive default schema*/));
+                        if 
(schema.properties().containsKey(HiveConstants.LOCATION)) {
+                          String defaultSchemaLocation =
+                              schema.properties().get(HiveConstants.LOCATION);
+                          if (defaultSchemaLocation != null && 
!defaultSchemaLocation.isEmpty()) {
+                            locations.add(defaultSchemaLocation);
+                          } else {
+                            LOG.warn("Catalog %s location is not found", 
ident);
+                          }
+                        }
+                      }
+                    });
+          }
+          break;
+        case CATALOG:
+          {
+            Catalog catalogObj = 
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(ident);
+            if (catalogObj.provider().equals("hive")) {
+              Schema schema =
+                  GravitinoEnv.getInstance()
+                      .schemaDispatcher()
+                      .loadSchema(
+                          NameIdentifier.of(
+                              metalake, catalogObj.name(), "default" /*Hive 
default schema*/));
+              if (schema.properties().containsKey(HiveConstants.LOCATION)) {
+                String defaultSchemaLocation = 
schema.properties().get(HiveConstants.LOCATION);
+                if (defaultSchemaLocation != null && 
!defaultSchemaLocation.isEmpty()) {
+                  locations.add(defaultSchemaLocation);
+                } else {
+                  LOG.warn("Catalog %s location is not found", ident);
+                }
+              }
+            }
+          }
+          break;
+        case SCHEMA:
+          {
+            Schema schema = 
GravitinoEnv.getInstance().schemaDispatcher().loadSchema(ident);
+            if (schema.properties().containsKey(HiveConstants.LOCATION)) {
+              String schemaLocation = 
schema.properties().get(HiveConstants.LOCATION);

Review Comment:
   This PR only supports Hive authorization. I created an issue #6133 to track 
it.



##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +376,127 @@ private static void checkCatalogType(
           catalogIdent, catalog.type(), privilege);
     }
   }
+
+  public static List<String> getMetadataObjectLocation(
+      NameIdentifier ident, Entity.EntityType type) {
+    List<String> locations = new ArrayList<>();
+    MetadataObject metadataObject;
+    try {
+      metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+    } catch (IllegalArgumentException e) {
+      LOG.warn("Illegal argument exception for metadata object %s type %s", 
ident, type, e);
+      return locations;
+    }
+
+    String metalake =
+        (type == Entity.EntityType.METALAKE ? ident.name() : 
ident.namespace().level(0));
+    try {
+      switch (metadataObject.type()) {
+        case METALAKE:
+          {
+            NameIdentifier[] identifiers =
+                
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+            Arrays.stream(identifiers)
+                .collect(Collectors.toList())
+                .forEach(
+                    identifier -> {
+                      Catalog catalogObj =
+                          
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier);
+                      if (catalogObj.provider().equals("hive")) {
+                        Schema schema =
+                            GravitinoEnv.getInstance()
+                                .schemaDispatcher()
+                                .loadSchema(
+                                    NameIdentifier.of(
+                                        metalake,
+                                        catalogObj.name(),
+                                        "default" /*Hive default schema*/));
+                        if 
(schema.properties().containsKey(HiveConstants.LOCATION)) {
+                          String defaultSchemaLocation =
+                              schema.properties().get(HiveConstants.LOCATION);
+                          if (defaultSchemaLocation != null && 
!defaultSchemaLocation.isEmpty()) {
+                            locations.add(defaultSchemaLocation);
+                          } else {
+                            LOG.warn("Catalog %s location is not found", 
ident);
+                          }
+                        }
+                      }
+                    });
+          }
+          break;
+        case CATALOG:
+          {
+            Catalog catalogObj = 
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(ident);
+            if (catalogObj.provider().equals("hive")) {
+              Schema schema =
+                  GravitinoEnv.getInstance()
+                      .schemaDispatcher()
+                      .loadSchema(
+                          NameIdentifier.of(
+                              metalake, catalogObj.name(), "default" /*Hive 
default schema*/));
+              if (schema.properties().containsKey(HiveConstants.LOCATION)) {
+                String defaultSchemaLocation = 
schema.properties().get(HiveConstants.LOCATION);
+                if (defaultSchemaLocation != null && 
!defaultSchemaLocation.isEmpty()) {
+                  locations.add(defaultSchemaLocation);
+                } else {
+                  LOG.warn("Catalog %s location is not found", ident);
+                }
+              }
+            }
+          }
+          break;
+        case SCHEMA:
+          {
+            Schema schema = 
GravitinoEnv.getInstance().schemaDispatcher().loadSchema(ident);
+            if (schema.properties().containsKey(HiveConstants.LOCATION)) {
+              String schemaLocation = 
schema.properties().get(HiveConstants.LOCATION);
+              if (schemaLocation != null && schemaLocation.isEmpty()) {
+                locations.add(schemaLocation);
+              } else {
+                LOG.warn("Schema %s location is not found", ident);
+              }
+            }
+          }
+          break;
+        case TABLE:
+          {
+            Table table = 
GravitinoEnv.getInstance().tableDispatcher().loadTable(ident);

Review Comment:
   Currently, Gravitino didn't implement get TableOperationDispatcher in the 
GravitinoEnv.



##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -183,36 +540,66 @@ public List<AuthorizationSecurableObject> 
translatePrivilege(SecurableObject sec
                   // in the RangerAuthorizationHDFSPlugin.
                   break;
                 case USE_SCHEMA:
+                  switch (securableObject.type()) {
+                    case METALAKE:
+                    case CATALOG:
+                    case SCHEMA:
+                      AuthorizationUtils.getMetadataObjectLocation(
+                              identifier, 
MetadataObjectUtil.toEntityType(securableObject))
+                          .stream()
+                          .forEach(
+                              locationPath -> {
+                                PathBasedMetadataObject pathBaseMetadataObject 
=
+                                    new PathBasedMetadataObject(
+                                        securableObject.parent(),
+                                        securableObject.name(),
+                                        locationPath,
+                                        PathBasedMetadataObject.Type.PATH);
+                                
pathBaseMetadataObject.validateAuthorizationMetadataObject();
+                                rangerSecurableObjects.add(
+                                    generateAuthorizationSecurableObject(
+                                        pathBaseMetadataObject.names(),
+                                        locationPath,
+                                        PathBasedMetadataObject.Type.PATH,
+                                        rangerPrivileges));
+                              });
+                      break;
+                    default:
+                      checkOmissionTranslate(

Review Comment:
   I removed it.



##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +128,319 @@ public List<String> policyResourceDefinesRule() {
     return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
   }
 
+  String getAuthorizationPath(PathBasedMetadataObject pathBasedMetadataObject) 
{
+    return HDFS_PATTERN.matcher(pathBasedMetadataObject.path()).replaceAll("");
+  }
+
+  /**
+   * Find the managed policy for the ranger securable object.
+   *
+   * @param authzMetadataObject The ranger securable object to find the 
managed policy.
+   * @return The managed policy for the metadata object.
+   */
+  public RangerPolicy findManagedPolicy(AuthorizationMetadataObject 
authzMetadataObject)
+      throws AuthorizationPluginException {
+    List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+    if (!policies.isEmpty()) {
+      /**
+       * Because Ranger doesn't support the precise search, Ranger will return 
the policy meets the
+       * wildcard(*,?) conditions, If you use `/a/b` condition to search 
policy, the Ranger will
+       * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely 
filter this research
+       * results.
+       */
+      List<String> nsMetadataObj = authzMetadataObject.names();
+      PathBasedMetadataObject pathAuthzMetadataObject =
+          (PathBasedMetadataObject) authzMetadataObject;
+      Map<String, String> preciseFilters = new HashMap<>();
+      for (int i = 0; i < nsMetadataObj.size() && i < 
policyResourceDefinesRule().size(); i++) {
+        preciseFilters.put(
+            policyResourceDefinesRule().get(i), 
getAuthorizationPath(pathAuthzMetadataObject));
+      }
+      policies =
+          policies.stream()
+              .filter(
+                  policy ->
+                      policy.getResources().entrySet().stream()
+                          .allMatch(
+                              entry ->
+                                  preciseFilters.containsKey(entry.getKey())
+                                      && entry.getValue().getValues().size() 
== 1

Review Comment:
   The Gravitino only maintained the policies that it created.



##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +376,127 @@ private static void checkCatalogType(
           catalogIdent, catalog.type(), privilege);
     }
   }
+
+  public static List<String> getMetadataObjectLocation(
+      NameIdentifier ident, Entity.EntityType type) {
+    List<String> locations = new ArrayList<>();
+    MetadataObject metadataObject;
+    try {
+      metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+    } catch (IllegalArgumentException e) {
+      LOG.warn("Illegal argument exception for metadata object %s type %s", 
ident, type, e);
+      return locations;
+    }
+
+    String metalake =
+        (type == Entity.EntityType.METALAKE ? ident.name() : 
ident.namespace().level(0));
+    try {
+      switch (metadataObject.type()) {
+        case METALAKE:
+          {
+            NameIdentifier[] identifiers =
+                
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+            Arrays.stream(identifiers)
+                .collect(Collectors.toList())
+                .forEach(
+                    identifier -> {
+                      Catalog catalogObj =
+                          
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier);
+                      if (catalogObj.provider().equals("hive")) {
+                        Schema schema =
+                            GravitinoEnv.getInstance()
+                                .schemaDispatcher()
+                                .loadSchema(
+                                    NameIdentifier.of(
+                                        metalake,
+                                        catalogObj.name(),
+                                        "default" /*Hive default schema*/));
+                        if 
(schema.properties().containsKey(HiveConstants.LOCATION)) {
+                          String defaultSchemaLocation =
+                              schema.properties().get(HiveConstants.LOCATION);
+                          if (defaultSchemaLocation != null && 
!defaultSchemaLocation.isEmpty()) {
+                            locations.add(defaultSchemaLocation);
+                          } else {
+                            LOG.warn("Catalog %s location is not found", 
ident);
+                          }
+                        }
+                      }
+                    });
+          }
+          break;
+        case CATALOG:
+          {
+            Catalog catalogObj = 
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(ident);
+            if (catalogObj.provider().equals("hive")) {
+              Schema schema =
+                  GravitinoEnv.getInstance()
+                      .schemaDispatcher()
+                      .loadSchema(
+                          NameIdentifier.of(
+                              metalake, catalogObj.name(), "default" /*Hive 
default schema*/));
+              if (schema.properties().containsKey(HiveConstants.LOCATION)) {
+                String defaultSchemaLocation = 
schema.properties().get(HiveConstants.LOCATION);
+                if (defaultSchemaLocation != null && 
!defaultSchemaLocation.isEmpty()) {
+                  locations.add(defaultSchemaLocation);
+                } else {
+                  LOG.warn("Catalog %s location is not found", ident);
+                }
+              }
+            }
+          }
+          break;
+        case SCHEMA:
+          {
+            Schema schema = 
GravitinoEnv.getInstance().schemaDispatcher().loadSchema(ident);
+            if (schema.properties().containsKey(HiveConstants.LOCATION)) {
+              String schemaLocation = 
schema.properties().get(HiveConstants.LOCATION);
+              if (schemaLocation != null && schemaLocation.isEmpty()) {
+                locations.add(schemaLocation);
+              } else {
+                LOG.warn("Schema %s location is not found", ident);
+              }
+            }
+          }
+          break;
+        case TABLE:
+          {
+            Table table = 
GravitinoEnv.getInstance().tableDispatcher().loadTable(ident);
+            if (table.properties().containsKey(HiveConstants.LOCATION)) {
+              String tableLocation = 
table.properties().get(HiveConstants.LOCATION);
+              if (tableLocation != null && tableLocation.isEmpty()) {
+                locations.add(tableLocation);
+              } else {
+                LOG.warn("Table %s location is not found", ident);

Review Comment:
   The Hive table will be the return location.



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: [email protected]

For queries about this service, please contact Infrastructure at:
[email protected]


Reply via email to