yuqi1129 commented on code in PR #6100:
URL: https://github.com/apache/gravitino/pull/6100#discussion_r1903754504
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) {
+ // do nothing when fileset is renamed
+ return;
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported metadata object type: " + authzMetadataObject.type());
+ }
+
+ List<String> oldMetadataNames = new ArrayList<>();
+ List<String> newMetadataNames = new ArrayList<>();
+ for (int index = 0; index < loop.size(); index++) {
+
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+ AuthorizationMetadataObject.Type type =
+ (index == 0
+ ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+ : (index == 1
+ ? RangerHadoopSQLMetadataObject.Type.TABLE
+ : RangerHadoopSQLMetadataObject.Type.COLUMN));
+ AuthorizationMetadataObject oldHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(oldMetadataNames),
+ AuthorizationMetadataObject.getLastName(oldMetadataNames),
+ type);
+ AuthorizationMetadataObject newHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(newMetadataNames),
+ AuthorizationMetadataObject.getLastName(newMetadataNames),
+ type);
+ updatePolicyByMetadataObject(
+ MetadataObject.Type.SCHEMA, oldHadoopSQLMetadataObject,
newHadoopSQLMetadataObject);
+ }
+ }
+
+ @Override
+ protected void updatePolicyByMetadataObject(
+ MetadataObject.Type operationType,
+ AuthorizationMetadataObject oldAuthzMetaobject,
+ AuthorizationMetadataObject newAuthzMetaobject) {
+ List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaobject);
+ List<RangerPolicy> existNewPolicies =
wildcardSearchPolies(newAuthzMetaobject);
+ if (oldPolicies.isEmpty()) {
+ LOG.warn("Cannot find the Ranger policy for the metadata object({})!",
oldAuthzMetaobject);
+ }
+ if (!existNewPolicies.isEmpty()) {
+ LOG.warn("The Ranger policy for the metadata object({}) already
exists!", newAuthzMetaobject);
+ }
+ Map<MetadataObject.Type, Integer> operationTypeIndex =
+ ImmutableMap.of(
+ MetadataObject.Type.SCHEMA, 0,
+ MetadataObject.Type.TABLE, 1,
+ MetadataObject.Type.COLUMN, 2);
+ oldPolicies.stream()
+ .forEach(
+ policy -> {
+ try {
+ String policyName = policy.getName();
+ int index = operationTypeIndex.get(operationType);
+
+ // Update the policy name is following Gravitino's spec
+ if (policy
+ .getName()
+ .equals(
+
AuthorizationSecurableObject.DOT_JOINER.join(oldAuthzMetaobject.names()))) {
+ List<String> policyNames =
+ Lists.newArrayList(
+
AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName));
+ Preconditions.checkArgument(
+ policyNames.size() >= oldAuthzMetaobject.names().size(),
+ String.format("The policy name(%s) is invalid!",
policyName));
+ if
(policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) {
+ // Doesn't need to rename the policy `*`
+ return;
+ }
+ policyNames.set(index,
newAuthzMetaobject.names().get(index));
+
policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames));
+ }
+ // Update the policy resource name to new name
+ policy
+ .getResources()
+ .put(
+ rangerHelper.policyResourceDefines.get(index),
+ new RangerPolicy.RangerPolicyResource(
+ newAuthzMetaobject.names().get(index)));
+
+ boolean alreadyExist =
+ existNewPolicies.stream()
+ .anyMatch(
+ existNewPolicy ->
+
existNewPolicy.getName().equals(policy.getName())
+ ||
existNewPolicy.getResources().equals(policy.getResources()));
+ if (alreadyExist) {
+ LOG.warn(
+ "The Ranger policy for the metadata object({}) already
exists!",
+ newAuthzMetaobject);
+ return;
+ }
+
+ // Update the policy
+ rangerClient.updatePolicy(policy.getId(), policy);
+ } catch (RangerServiceException e) {
+ LOG.error("Failed to rename the policy {}!", policy);
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ /**
+ * IF remove the SCHEMA, need to remove these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF remove the TABLE, need to remove these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ * IF remove the COLUMN, Only need to remove `{schema}.*.*` <br>
+ */
+ @Override
+ protected void doRemoveMetadataObject(AuthorizationMetadataObject
authzMetadataObject) {
+ if (authzMetadataObject.type().equals(SCHEMA)) {
+ doRemoveSchemaMetadataObject(authzMetadataObject);
+ } else if (authzMetadataObject.type().equals(TABLE)) {
+ doRemoveTableMetadataObject(authzMetadataObject);
+ } else if (authzMetadataObject.type().equals(PATH)) {
+ removePolicyByMetadataObject(authzMetadataObject);
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported authorization metadata object type: " +
authzMetadataObject.type());
+ }
+ }
+
+ /**
+ * Remove the SCHEMA, Need to remove these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` permissions.
+ */
+ private void doRemoveSchemaMetadataObject(AuthorizationMetadataObject
authzMetadataObject) {
+ Preconditions.checkArgument(
+ authzMetadataObject instanceof PathBasedMetadataObject,
+ "The metadata object must be a PathBasedMetadataObject");
+ Preconditions.checkArgument(
+ authzMetadataObject.type() == SCHEMA, "The metadata object type must
be SCHEMA");
+ Preconditions.checkArgument(
+ authzMetadataObject.names().size() == 1, "The metadata object names
must be 1");
+ if (RangerHelper.RESOURCE_ALL.equals(authzMetadataObject.name())) {
+ // Remove all schema in this catalog
+ NameIdentifier[] catalogs =
+
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+ Arrays.asList(catalogs).stream()
+ .forEach(
+ catalog -> {
+ List<String> catalogLocations =
+ AuthorizationUtils.getMetadataObjectLocation(
+ NameIdentifier.of(catalog.name()),
Entity.EntityType.CATALOG);
+ catalogLocations.stream()
+ .forEach(
+ locationPath -> {
+ AuthorizationMetadataObject catalogMetadataObject =
+ new PathBasedMetadataObject(
+ metalake, catalog.name(), locationPath,
PATH);
+ doRemoveSchemaMetadataObject(catalogMetadataObject);
+ });
+ });
+ } else {
+ // Remove all table in this schema
+ NameIdentifier[] tables =
+ GravitinoEnv.getInstance()
+ .tableDispatcher()
+ .listTables(Namespace.of(authzMetadataObject.name()));
+ Arrays.asList(tables).stream()
+ .forEach(
+ table -> {
+ NameIdentifier identifier =
+ NameIdentifier.of(authzMetadataObject.name(),
table.name());
+ List<String> tabLocations =
+ AuthorizationUtils.getMetadataObjectLocation(
+ identifier, Entity.EntityType.TABLE);
+ tabLocations.stream()
+ .forEach(
+ locationPath -> {
+ AuthorizationMetadataObject tableMetadataObject =
+ new PathBasedMetadataObject(
+ authzMetadataObject.name(), table.name(),
locationPath, PATH);
+ doRemoveTableMetadataObject(tableMetadataObject);
+ });
+ // Remove schema
+ Schema schema =
+ GravitinoEnv.getInstance()
+ .schemaDispatcher()
+
.loadSchema(NameIdentifier.of(authzMetadataObject.name()));
+ List<String> schemaLocations =
+ AuthorizationUtils.getMetadataObjectLocation(
+ identifier, Entity.EntityType.SCHEMA);
+ schemaLocations.stream()
+ .forEach(
+ locationPath -> {
+ AuthorizationMetadataObject schemaMetadataObject =
+ new PathBasedMetadataObject(
+ authzMetadataObject.name(), schema.name(),
locationPath, PATH);
+ removePolicyByMetadataObject(schemaMetadataObject);
+ });
+ });
Review Comment:
I mean we need to remove all tables in a schema first, after all tables have
been, then we can start to remove schema. I noticed that the code will remove
the schema after we just remove one of tables in it.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]