xunliu commented on code in PR #6100:
URL: https://github.com/apache/gravitino/pull/6100#discussion_r1903689269
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) {
+ // do nothing when fileset is renamed
Review Comment:
Yes, I have next PR to process this part.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) {
+ // do nothing when fileset is renamed
+ return;
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported metadata object type: " + authzMetadataObject.type());
+ }
+
+ List<String> oldMetadataNames = new ArrayList<>();
+ List<String> newMetadataNames = new ArrayList<>();
+ for (int index = 0; index < loop.size(); index++) {
+
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+ AuthorizationMetadataObject.Type type =
+ (index == 0
+ ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+ : (index == 1
+ ? RangerHadoopSQLMetadataObject.Type.TABLE
+ : RangerHadoopSQLMetadataObject.Type.COLUMN));
+ AuthorizationMetadataObject oldHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(oldMetadataNames),
+ AuthorizationMetadataObject.getLastName(oldMetadataNames),
+ type);
+ AuthorizationMetadataObject newHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(newMetadataNames),
+ AuthorizationMetadataObject.getLastName(newMetadataNames),
+ type);
+ updatePolicyByMetadataObject(
+ MetadataObject.Type.SCHEMA, oldHadoopSQLMetadataObject,
newHadoopSQLMetadataObject);
+ }
+ }
+
Review Comment:
Fixed it.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -278,88 +696,91 @@ public List<AuthorizationSecurableObject>
translateOwner(MetadataObject gravitin
}
@Override
- public AuthorizationMetadataObject translateMetadataObject(MetadataObject
metadataObject) {
- Preconditions.checkArgument(
- allowMetadataObjectTypesRule().contains(metadataObject.type()),
- String.format(
- "The metadata object type %s is not supported in the
RangerAuthorizationHDFSPlugin",
- metadataObject.type()));
- List<String> nsMetadataObject =
-
Lists.newArrayList(SecurableObjects.DOT_SPLITTER.splitToList(metadataObject.fullName()));
- Preconditions.checkArgument(
- nsMetadataObject.size() > 0, "The metadata object must have at least
one name.");
-
- PathBasedMetadataObject rangerPathBaseMetadataObject;
- switch (metadataObject.type()) {
- case METALAKE:
- case CATALOG:
- rangerPathBaseMetadataObject =
- new PathBasedMetadataObject("", PathBasedMetadataObject.Type.PATH);
- break;
- case SCHEMA:
- rangerPathBaseMetadataObject =
- new PathBasedMetadataObject(
- metadataObject.fullName(), PathBasedMetadataObject.Type.PATH);
- break;
- case FILESET:
- rangerPathBaseMetadataObject =
- new PathBasedMetadataObject(
- getLocationPath(metadataObject),
PathBasedMetadataObject.Type.PATH);
- break;
- default:
- throw new AuthorizationPluginException(
- "The metadata object type %s is not supported in the
RangerAuthorizationHDFSPlugin",
- metadataObject.type());
- }
- rangerPathBaseMetadataObject.validateAuthorizationMetadataObject();
- return rangerPathBaseMetadataObject;
- }
-
- private NameIdentifier getObjectNameIdentifier(MetadataObject
metadataObject) {
- return NameIdentifier.parse(String.format("%s.%s", metalake,
metadataObject.fullName()));
+ public List<AuthorizationMetadataObject>
translateMetadataObject(MetadataObject metadataObject) {
+ List<AuthorizationMetadataObject> authzMetadataObjects = new ArrayList<>();
+ Entity.EntityType entityType =
MetadataObjectUtil.toEntityType(metadataObject);
+ NameIdentifier identifier =
+ metadataObject.type().equals(MetadataObject.Type.METALAKE)
+ ? NameIdentifier.of(metadataObject.fullName())
+ : NameIdentifier.parse(String.join(".", metalake,
metadataObject.fullName()));
+ List<String> locations =
AuthorizationUtils.getMetadataObjectLocation(identifier, entityType);
+ locations.stream()
+ .forEach(
+ locationPath -> {
+ PathBasedMetadataObject pathBaseMetadataObject =
+ new PathBasedMetadataObject(
+ metadataObject.parent(),
+ metadataObject.name(),
+ locationPath,
+ PathBasedMetadataObject.Type.PATH);
+ pathBaseMetadataObject.validateAuthorizationMetadataObject();
+ authzMetadataObjects.add(pathBaseMetadataObject);
+ });
+ return authzMetadataObjects;
}
- @VisibleForTesting
- public String getLocationPath(MetadataObject metadataObject) throws
NoSuchEntityException {
- String locationPath = null;
- switch (metadataObject.type()) {
- case METALAKE:
- case SCHEMA:
- case TABLE:
- break;
- case CATALOG:
- {
- Namespace nsMetadataObj =
Namespace.fromString(metadataObject.fullName());
- NameIdentifier ident = NameIdentifier.of(metalake,
nsMetadataObj.level(0));
- Catalog catalog =
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(ident);
- if (catalog.provider().equals("hive")) {
- Schema schema =
- GravitinoEnv.getInstance()
- .schemaDispatcher()
- .loadSchema(
- NameIdentifier.of(
- metalake, nsMetadataObj.level(0), "default" /*Hive
default schema*/));
- String defaultSchemaLocation =
schema.properties().get(HiveConstants.LOCATION);
- locationPath =
pattern.matcher(defaultSchemaLocation).replaceAll("");
- }
- }
- break;
- case FILESET:
- FilesetDispatcher filesetDispatcher =
GravitinoEnv.getInstance().filesetDispatcher();
- NameIdentifier identifier = getObjectNameIdentifier(metadataObject);
- Fileset fileset = filesetDispatcher.loadFileset(identifier);
+ @Override
+ public Boolean onMetadataUpdated(MetadataObjectChange... changes) throws
RuntimeException {
+ for (MetadataObjectChange change : changes) {
+ if (change instanceof MetadataObjectChange.RenameMetadataObject) {
+ MetadataObject metadataObject =
+ ((MetadataObjectChange.RenameMetadataObject)
change).metadataObject();
+ MetadataObject newMetadataObject =
+ ((MetadataObjectChange.RenameMetadataObject)
change).newMetadataObject();
Preconditions.checkArgument(
- fileset != null, String.format("Fileset %s is not found",
identifier));
- String filesetLocation = fileset.storageLocation();
+ metadataObject.type() == newMetadataObject.type(),
+ "The old and new metadata object type must be equal!");
+ if (metadataObject.type() == MetadataObject.Type.METALAKE) {
+ // Rename the metalake name
+ this.metalake = newMetadataObject.name();
+ // Did not need to update the Ranger policy
+ continue;
+ } else if (metadataObject.type() == MetadataObject.Type.CATALOG) {
+ // Did not need to update the Ranger policy
+ continue;
+ }
+ List<AuthorizationMetadataObject> oldAuthzMetadataObjects =
+ translateMetadataObject(metadataObject);
+ List<AuthorizationMetadataObject> newAuthzMetadataObjects =
+ translateMetadataObject(newMetadataObject);
Preconditions.checkArgument(
- filesetLocation != null, String.format("Fileset %s location is not
found", identifier));
- locationPath = pattern.matcher(filesetLocation).replaceAll("");
- break;
- default:
- throw new AuthorizationPluginException(
- "The metadata object type %s is not supported in the
RangerAuthorizationHDFSPlugin",
- metadataObject.type());
+ oldAuthzMetadataObjects.size() == newAuthzMetadataObjects.size(),
+ "The old and new metadata objects size must be equal!");
+ for (int i = 0; i < oldAuthzMetadataObjects.size(); i++) {
+ AuthorizationMetadataObject oldAuthMetadataObject =
oldAuthzMetadataObjects.get(i);
+ AuthorizationMetadataObject newAuthzMetadataObject =
newAuthzMetadataObjects.get(i);
+ if (oldAuthMetadataObject.equals(newAuthzMetadataObject)) {
Review Comment:
Fixed it, and added Unit tests.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
Review Comment:
DONE.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) {
+ // do nothing when fileset is renamed
+ return;
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported metadata object type: " + authzMetadataObject.type());
+ }
+
+ List<String> oldMetadataNames = new ArrayList<>();
+ List<String> newMetadataNames = new ArrayList<>();
+ for (int index = 0; index < loop.size(); index++) {
+
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+ AuthorizationMetadataObject.Type type =
+ (index == 0
+ ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+ : (index == 1
+ ? RangerHadoopSQLMetadataObject.Type.TABLE
+ : RangerHadoopSQLMetadataObject.Type.COLUMN));
Review Comment:
DONE.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -278,88 +696,91 @@ public List<AuthorizationSecurableObject>
translateOwner(MetadataObject gravitin
}
@Override
- public AuthorizationMetadataObject translateMetadataObject(MetadataObject
metadataObject) {
- Preconditions.checkArgument(
- allowMetadataObjectTypesRule().contains(metadataObject.type()),
- String.format(
- "The metadata object type %s is not supported in the
RangerAuthorizationHDFSPlugin",
- metadataObject.type()));
- List<String> nsMetadataObject =
-
Lists.newArrayList(SecurableObjects.DOT_SPLITTER.splitToList(metadataObject.fullName()));
- Preconditions.checkArgument(
- nsMetadataObject.size() > 0, "The metadata object must have at least
one name.");
-
- PathBasedMetadataObject rangerPathBaseMetadataObject;
- switch (metadataObject.type()) {
- case METALAKE:
- case CATALOG:
- rangerPathBaseMetadataObject =
- new PathBasedMetadataObject("", PathBasedMetadataObject.Type.PATH);
- break;
- case SCHEMA:
- rangerPathBaseMetadataObject =
- new PathBasedMetadataObject(
- metadataObject.fullName(), PathBasedMetadataObject.Type.PATH);
- break;
- case FILESET:
- rangerPathBaseMetadataObject =
- new PathBasedMetadataObject(
- getLocationPath(metadataObject),
PathBasedMetadataObject.Type.PATH);
- break;
- default:
- throw new AuthorizationPluginException(
- "The metadata object type %s is not supported in the
RangerAuthorizationHDFSPlugin",
- metadataObject.type());
- }
- rangerPathBaseMetadataObject.validateAuthorizationMetadataObject();
- return rangerPathBaseMetadataObject;
- }
-
- private NameIdentifier getObjectNameIdentifier(MetadataObject
metadataObject) {
- return NameIdentifier.parse(String.format("%s.%s", metalake,
metadataObject.fullName()));
+ public List<AuthorizationMetadataObject>
translateMetadataObject(MetadataObject metadataObject) {
+ List<AuthorizationMetadataObject> authzMetadataObjects = new ArrayList<>();
+ Entity.EntityType entityType =
MetadataObjectUtil.toEntityType(metadataObject);
+ NameIdentifier identifier =
+ metadataObject.type().equals(MetadataObject.Type.METALAKE)
+ ? NameIdentifier.of(metadataObject.fullName())
+ : NameIdentifier.parse(String.join(".", metalake,
metadataObject.fullName()));
Review Comment:
The `metalake` and `catalog` didn't have a location in the HiveCatalog.
##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +378,129 @@ private static void checkCatalogType(
catalogIdent, catalog.type(), privilege);
}
}
+
+ public static List<String> getMetadataObjectLocation(
+ NameIdentifier ident, Entity.EntityType type) {
+ List<String> locations = new ArrayList<>();
+ MetadataObject metadataObject;
+ try {
+ metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+ } catch (IllegalArgumentException e) {
+ LOG.warn("Illegal argument exception for metadata object %s type %s",
ident, type, e);
+ return locations;
+ }
+
+ String metalake =
+ (type == Entity.EntityType.METALAKE ? ident.name() :
ident.namespace().level(0));
+ try {
+ switch (metadataObject.type()) {
+ case METALAKE:
+ {
+ NameIdentifier[] identifiers =
+
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+ List<String> finalLocationPath = locations;
Review Comment:
OK, I removed it.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java:
##########
@@ -76,6 +85,328 @@ public Map<Privilege.Name, Set<AuthorizationPrivilege>>
privilegesMappingRule()
ImmutableSet.of(RangerHadoopSQLPrivilege.READ,
RangerHadoopSQLPrivilege.SELECT));
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `db.table` condition to search
policy, the Ranger will
+ * match `db1.table1`, `db1.table2`, `db*.table*`, So we need to
manually precisely filter
+ * this research results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
nsMetadataObj.get(i));
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ @Override
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ for (int i = 0; i < authzMetadataObject.names().size() && i <
resourceDefines.size(); i++) {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefines.get(i),
+ authzMetadataObject.names().get(i));
+ }
+
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ * IF rename the COLUMN, Only need to rename `{schema}.*.*` <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop = new ArrayList<>();
Review Comment:
OK, I reorganized it.
##########
catalogs/catalog-hive/src/test/java/org/apache/gravitino/catalog/hive/integration/test/CatalogHiveIT.java:
##########
@@ -229,7 +229,14 @@ public void stop() throws IOException {
catalog.asSchemas().dropSchema(schema, true);
}));
Arrays.stream(metalake.listCatalogs())
- .forEach((catalogName -> metalake.dropCatalog(catalogName, true)));
+ .forEach(
+ catalogName -> {
+ try {
+ metalake.dropCatalog(catalogName, true);
+ } catch (Exception e) {
+ // Ignore exception
Review Comment:
OK, I rollback this change.
##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +378,129 @@ private static void checkCatalogType(
catalogIdent, catalog.type(), privilege);
}
}
+
+ public static List<String> getMetadataObjectLocation(
+ NameIdentifier ident, Entity.EntityType type) {
+ List<String> locations = new ArrayList<>();
+ MetadataObject metadataObject;
+ try {
+ metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+ } catch (IllegalArgumentException e) {
+ LOG.warn("Illegal argument exception for metadata object %s type %s",
ident, type, e);
+ return locations;
+ }
+
+ String metalake =
+ (type == Entity.EntityType.METALAKE ? ident.name() :
ident.namespace().level(0));
+ try {
+ switch (metadataObject.type()) {
+ case METALAKE:
+ {
+ NameIdentifier[] identifiers =
+
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+ List<String> finalLocationPath = locations;
+ Arrays.stream(identifiers)
+ .collect(Collectors.toList())
+ .forEach(
+ identifier -> {
+ Catalog catalogObj =
+
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier);
+ if (catalogObj.provider().equals("hive")) {
+ Schema schema =
+ GravitinoEnv.getInstance()
+ .schemaDispatcher()
+ .loadSchema(
+ NameIdentifier.of(
+ metalake,
+ catalogObj.name(),
+ "default" /*Hive default schema*/));
+ if
(schema.properties().containsKey(HiveConstants.LOCATION)) {
+ String defaultSchemaLocation =
+ schema.properties().get(HiveConstants.LOCATION);
+ Preconditions.checkArgument(
+ defaultSchemaLocation != null,
+ String.format("Catalog %s location is not
found", ident));
+ String location =
+
HDFS_PATTERN.matcher(defaultSchemaLocation).replaceAll("");
Review Comment:
OK, I will return to the original location, We can process it by myself.
##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +378,129 @@ private static void checkCatalogType(
catalogIdent, catalog.type(), privilege);
}
}
+
+ public static List<String> getMetadataObjectLocation(
+ NameIdentifier ident, Entity.EntityType type) {
+ List<String> locations = new ArrayList<>();
+ MetadataObject metadataObject;
+ try {
+ metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+ } catch (IllegalArgumentException e) {
+ LOG.warn("Illegal argument exception for metadata object %s type %s",
ident, type, e);
+ return locations;
+ }
+
+ String metalake =
+ (type == Entity.EntityType.METALAKE ? ident.name() :
ident.namespace().level(0));
+ try {
+ switch (metadataObject.type()) {
+ case METALAKE:
+ {
+ NameIdentifier[] identifiers =
+
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+ List<String> finalLocationPath = locations;
+ Arrays.stream(identifiers)
+ .collect(Collectors.toList())
+ .forEach(
+ identifier -> {
+ Catalog catalogObj =
+
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier);
+ if (catalogObj.provider().equals("hive")) {
+ Schema schema =
+ GravitinoEnv.getInstance()
+ .schemaDispatcher()
+ .loadSchema(
+ NameIdentifier.of(
+ metalake,
+ catalogObj.name(),
+ "default" /*Hive default schema*/));
+ if
(schema.properties().containsKey(HiveConstants.LOCATION)) {
Review Comment:
This code will return Hive warehouse root path.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java:
##########
@@ -76,6 +85,328 @@ public Map<Privilege.Name, Set<AuthorizationPrivilege>>
privilegesMappingRule()
ImmutableSet.of(RangerHadoopSQLPrivilege.READ,
RangerHadoopSQLPrivilege.SELECT));
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `db.table` condition to search
policy, the Ranger will
+ * match `db1.table1`, `db1.table2`, `db*.table*`, So we need to
manually precisely filter
+ * this research results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
nsMetadataObj.get(i));
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ @Override
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ for (int i = 0; i < authzMetadataObject.names().size() && i <
resourceDefines.size(); i++) {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefines.get(i),
+ authzMetadataObject.names().get(i));
+ }
+
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ * IF rename the COLUMN, Only need to rename `{schema}.*.*` <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop = new ArrayList<>();
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) { // do nothing
when fileset is renamed
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported metadata object type: " + authzMetadataObject.type());
+ }
+
+ List<String> oldMetadataNames = new ArrayList<>();
+ List<String> newMetadataNames = new ArrayList<>();
+ for (int index = 0; index < loop.size(); index++) {
+
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+ AuthorizationMetadataObject.Type type =
+ (index == 0
+ ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+ : (index == 1
+ ? RangerHadoopSQLMetadataObject.Type.TABLE
+ : RangerHadoopSQLMetadataObject.Type.COLUMN));
+ AuthorizationMetadataObject authzMetadataObject1 =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(oldMetadataNames),
+ AuthorizationMetadataObject.getLastName(oldMetadataNames),
+ type);
+ AuthorizationMetadataObject newAuthzMetadataObject1 =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(newMetadataNames),
+ AuthorizationMetadataObject.getLastName(newMetadataNames),
+ type);
+ updatePolicyByMetadataObject(
+ type.metadataObjectType(), authzMetadataObject1,
newAuthzMetadataObject1);
+ }
+ }
+
+ @Override
+ protected void updatePolicyByMetadataObject(
+ MetadataObject.Type operationType,
+ AuthorizationMetadataObject oldAuthzMetaobject,
+ AuthorizationMetadataObject newAuthzMetaobject) {
+ List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaobject);
+ List<RangerPolicy> existNewPolicies =
wildcardSearchPolies(newAuthzMetaobject);
+ if (oldPolicies.isEmpty()) {
+ LOG.warn("Cannot find the Ranger policy for the metadata object({})!",
oldAuthzMetaobject);
+ }
+ if (!existNewPolicies.isEmpty()) {
+ LOG.warn("The Ranger policy for the metadata object({}) already
exists!", newAuthzMetaobject);
+ }
+ Map<MetadataObject.Type, Integer> operationTypeIndex =
+ ImmutableMap.of(
+ MetadataObject.Type.SCHEMA, 0,
+ MetadataObject.Type.TABLE, 1,
+ MetadataObject.Type.COLUMN, 2);
+ oldPolicies.stream()
+ .forEach(
+ policy -> {
+ try {
+ String policyName = policy.getName();
+ int index = operationTypeIndex.get(operationType);
+
+ // Update the policy name is following Gravitino's spec
+ if (policy
+ .getName()
+ .equals(
+
AuthorizationSecurableObject.DOT_JOINER.join(oldAuthzMetaobject.names()))) {
+ List<String> policyNames =
+ Lists.newArrayList(
+
AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName));
+ Preconditions.checkArgument(
+ policyNames.size() >= oldAuthzMetaobject.names().size(),
+ String.format("The policy name(%s) is invalid!",
policyName));
+ if
(policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) {
+ // Doesn't need to rename the policy `*`
+ return;
+ }
+ policyNames.set(index,
newAuthzMetaobject.names().get(index));
+
policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames));
+ }
+ // Update the policy resource name to new name
+ policy
+ .getResources()
+ .put(
+ policyResourceDefinesRule().get(index),
+ new RangerPolicy.RangerPolicyResource(
+ newAuthzMetaobject.names().get(index)));
+
+ boolean alreadyExist =
+ existNewPolicies.stream()
+ .anyMatch(
+ existNewPolicy ->
+
existNewPolicy.getName().equals(policy.getName())
+ ||
existNewPolicy.getResources().equals(policy.getResources()));
+ if (alreadyExist) {
+ LOG.warn(
+ "The Ranger policy for the metadata object({}) already
exists!",
+ newAuthzMetaobject);
+ return;
+ }
+
+ // Update the policy
+ rangerClient.updatePolicy(policy.getId(), policy);
+ } catch (RangerServiceException e) {
+ LOG.error("Failed to rename the policy {}!", policy);
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ /**
+ * IF remove the SCHEMA, need to remove these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF remove the TABLE, need to remove these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ * IF remove the COLUMN, Only need to remove `{schema}.*.*` <br>
+ */
+ @Override
+ protected void doRemoveMetadataObject(AuthorizationMetadataObject
authzMetadataObject) {
+ AuthorizationMetadataObject.Type type = authzMetadataObject.type();
+ if (type.equals(SCHEMA)) {
+ doRemoveSchemaMetadataObject(authzMetadataObject);
+ } else if (type.equals(TABLE)) {
+ doRemoveTableMetadataObject(authzMetadataObject);
Review Comment:
1. These three methods are different.
2. Remove `PATH` case.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
Review Comment:
OK, Renamed this variable name.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) {
+ // do nothing when fileset is renamed
+ return;
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported metadata object type: " + authzMetadataObject.type());
+ }
+
+ List<String> oldMetadataNames = new ArrayList<>();
+ List<String> newMetadataNames = new ArrayList<>();
+ for (int index = 0; index < loop.size(); index++) {
+
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+ AuthorizationMetadataObject.Type type =
+ (index == 0
+ ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+ : (index == 1
+ ? RangerHadoopSQLMetadataObject.Type.TABLE
+ : RangerHadoopSQLMetadataObject.Type.COLUMN));
+ AuthorizationMetadataObject oldHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(oldMetadataNames),
+ AuthorizationMetadataObject.getLastName(oldMetadataNames),
+ type);
+ AuthorizationMetadataObject newHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(newMetadataNames),
+ AuthorizationMetadataObject.getLastName(newMetadataNames),
+ type);
+ updatePolicyByMetadataObject(
+ MetadataObject.Type.SCHEMA, oldHadoopSQLMetadataObject,
newHadoopSQLMetadataObject);
+ }
+ }
+
+ @Override
+ protected void updatePolicyByMetadataObject(
+ MetadataObject.Type operationType,
+ AuthorizationMetadataObject oldAuthzMetaobject,
+ AuthorizationMetadataObject newAuthzMetaobject) {
+ List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaobject);
+ List<RangerPolicy> existNewPolicies =
wildcardSearchPolies(newAuthzMetaobject);
+ if (oldPolicies.isEmpty()) {
+ LOG.warn("Cannot find the Ranger policy for the metadata object({})!",
oldAuthzMetaobject);
Review Comment:
Fixed it.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) {
+ // do nothing when fileset is renamed
+ return;
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported metadata object type: " + authzMetadataObject.type());
+ }
+
+ List<String> oldMetadataNames = new ArrayList<>();
+ List<String> newMetadataNames = new ArrayList<>();
+ for (int index = 0; index < loop.size(); index++) {
+
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+ AuthorizationMetadataObject.Type type =
+ (index == 0
+ ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+ : (index == 1
+ ? RangerHadoopSQLMetadataObject.Type.TABLE
+ : RangerHadoopSQLMetadataObject.Type.COLUMN));
+ AuthorizationMetadataObject oldHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(oldMetadataNames),
+ AuthorizationMetadataObject.getLastName(oldMetadataNames),
+ type);
+ AuthorizationMetadataObject newHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(newMetadataNames),
+ AuthorizationMetadataObject.getLastName(newMetadataNames),
+ type);
+ updatePolicyByMetadataObject(
+ MetadataObject.Type.SCHEMA, oldHadoopSQLMetadataObject,
newHadoopSQLMetadataObject);
+ }
+ }
+
+ @Override
+ protected void updatePolicyByMetadataObject(
+ MetadataObject.Type operationType,
+ AuthorizationMetadataObject oldAuthzMetaobject,
+ AuthorizationMetadataObject newAuthzMetaobject) {
+ List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaobject);
+ List<RangerPolicy> existNewPolicies =
wildcardSearchPolies(newAuthzMetaobject);
+ if (oldPolicies.isEmpty()) {
+ LOG.warn("Cannot find the Ranger policy for the metadata object({})!",
oldAuthzMetaobject);
+ }
+ if (!existNewPolicies.isEmpty()) {
+ LOG.warn("The Ranger policy for the metadata object({}) already
exists!", newAuthzMetaobject);
+ }
+ Map<MetadataObject.Type, Integer> operationTypeIndex =
+ ImmutableMap.of(
+ MetadataObject.Type.SCHEMA, 0,
+ MetadataObject.Type.TABLE, 1,
+ MetadataObject.Type.COLUMN, 2);
+ oldPolicies.stream()
+ .forEach(
+ policy -> {
+ try {
+ String policyName = policy.getName();
+ int index = operationTypeIndex.get(operationType);
+
+ // Update the policy name is following Gravitino's spec
+ if (policy
+ .getName()
+ .equals(
+
AuthorizationSecurableObject.DOT_JOINER.join(oldAuthzMetaobject.names()))) {
+ List<String> policyNames =
+ Lists.newArrayList(
+
AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName));
+ Preconditions.checkArgument(
+ policyNames.size() >= oldAuthzMetaobject.names().size(),
+ String.format("The policy name(%s) is invalid!",
policyName));
+ if
(policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) {
+ // Doesn't need to rename the policy `*`
+ return;
+ }
+ policyNames.set(index,
newAuthzMetaobject.names().get(index));
+
policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames));
+ }
+ // Update the policy resource name to new name
+ policy
+ .getResources()
+ .put(
+ rangerHelper.policyResourceDefines.get(index),
+ new RangerPolicy.RangerPolicyResource(
+ newAuthzMetaobject.names().get(index)));
+
+ boolean alreadyExist =
+ existNewPolicies.stream()
+ .anyMatch(
+ existNewPolicy ->
+
existNewPolicy.getName().equals(policy.getName())
+ ||
existNewPolicy.getResources().equals(policy.getResources()));
+ if (alreadyExist) {
+ LOG.warn(
+ "The Ranger policy for the metadata object({}) already
exists!",
+ newAuthzMetaobject);
+ return;
+ }
+
+ // Update the policy
+ rangerClient.updatePolicy(policy.getId(), policy);
+ } catch (RangerServiceException e) {
+ LOG.error("Failed to rename the policy {}!", policy);
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ /**
+ * IF remove the SCHEMA, need to remove these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF remove the TABLE, need to remove these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ * IF remove the COLUMN, Only need to remove `{schema}.*.*` <br>
+ */
+ @Override
+ protected void doRemoveMetadataObject(AuthorizationMetadataObject
authzMetadataObject) {
+ if (authzMetadataObject.type().equals(SCHEMA)) {
+ doRemoveSchemaMetadataObject(authzMetadataObject);
+ } else if (authzMetadataObject.type().equals(TABLE)) {
+ doRemoveTableMetadataObject(authzMetadataObject);
+ } else if (authzMetadataObject.type().equals(PATH)) {
+ removePolicyByMetadataObject(authzMetadataObject);
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported authorization metadata object type: " +
authzMetadataObject.type());
+ }
+ }
+
+ /**
+ * Remove the SCHEMA, Need to remove these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` permissions.
+ */
+ private void doRemoveSchemaMetadataObject(AuthorizationMetadataObject
authzMetadataObject) {
+ Preconditions.checkArgument(
+ authzMetadataObject instanceof PathBasedMetadataObject,
+ "The metadata object must be a PathBasedMetadataObject");
+ Preconditions.checkArgument(
+ authzMetadataObject.type() == SCHEMA, "The metadata object type must
be SCHEMA");
+ Preconditions.checkArgument(
+ authzMetadataObject.names().size() == 1, "The metadata object names
must be 1");
+ if (RangerHelper.RESOURCE_ALL.equals(authzMetadataObject.name())) {
+ // Remove all schema in this catalog
+ NameIdentifier[] catalogs =
+
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+ Arrays.asList(catalogs).stream()
+ .forEach(
+ catalog -> {
+ List<String> catalogLocations =
+ AuthorizationUtils.getMetadataObjectLocation(
+ NameIdentifier.of(catalog.name()),
Entity.EntityType.CATALOG);
+ catalogLocations.stream()
+ .forEach(
+ locationPath -> {
+ AuthorizationMetadataObject catalogMetadataObject =
+ new PathBasedMetadataObject(
+ metalake, catalog.name(), locationPath,
PATH);
+ doRemoveSchemaMetadataObject(catalogMetadataObject);
+ });
+ });
Review Comment:
This logic is
1. If the metadata object type is `SCHEMA`, and
2. The metadata object name() is `*`,
We need to remove all schema in this catalog.
##########
.github/workflows/backend-integration-test-action.yml:
##########
@@ -60,7 +60,8 @@ jobs:
-x :web:web:test -x :web:integration-test:test -x
:clients:client-python:test -x :flink-connector:flink:test -x
:spark-connector:spark-common:test
-x :spark-connector:spark-3.3:test -x
:spark-connector:spark-3.4:test -x :spark-connector:spark-3.5:test
-x :spark-connector:spark-runtime-3.3:test -x
:spark-connector:spark-runtime-3.4:test -x
:spark-connector:spark-runtime-3.5:test
- -x :authorizations:authorization-ranger:test -x
:trino-connector:integration-test:test -x :trino-connector:trino-connector:test
+ -x :trino-connector:integration-test:test -x
:trino-connector:trino-connector:test
Review Comment:
Yes, Because we have access control pipeline, So we need exclude
`authorization` test in here.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -118,27 +127,372 @@ public List<String> policyResourceDefinesRule() {
return ImmutableList.of(RangerDefines.PolicyResource.PATH.getName());
}
+ /**
+ * Find the managed policy for the ranger securable object.
+ *
+ * @param authzMetadataObject The ranger securable object to find the
managed policy.
+ * @return The managed policy for the metadata object.
+ */
+ public RangerPolicy findManagedPolicy(AuthorizationMetadataObject
authzMetadataObject)
+ throws AuthorizationPluginException {
+ List<RangerPolicy> policies = wildcardSearchPolies(authzMetadataObject);
+ if (!policies.isEmpty()) {
+ /**
+ * Because Ranger doesn't support the precise search, Ranger will return
the policy meets the
+ * wildcard(*,?) conditions, If you use `/a/b` condition to search
policy, the Ranger will
+ * match `/a/b1`, `/a/b2`, `/a/b*`, So we need to manually precisely
filter this research
+ * results.
+ */
+ List<String> nsMetadataObj = authzMetadataObject.names();
+ PathBasedMetadataObject pathAuthzMetadataObject =
+ (PathBasedMetadataObject) authzMetadataObject;
+ Map<String, String> preciseFilters = new HashMap<>();
+ for (int i = 0; i < nsMetadataObj.size() && i <
policyResourceDefinesRule().size(); i++) {
+ preciseFilters.put(policyResourceDefinesRule().get(i),
pathAuthzMetadataObject.path());
+ }
+ policies =
+ policies.stream()
+ .filter(
+ policy ->
+ policy.getResources().entrySet().stream()
+ .allMatch(
+ entry ->
+ preciseFilters.containsKey(entry.getKey())
+ && entry.getValue().getValues().size()
== 1
+ && entry
+ .getValue()
+ .getValues()
+
.contains(preciseFilters.get(entry.getKey()))))
+ .collect(Collectors.toList());
+ }
+ // Only return the policies that are managed by Gravitino.
+ if (policies.size() > 1) {
+ throw new AuthorizationPluginException("Each metadata object can have at
most one policy.");
+ }
+
+ if (policies.isEmpty()) {
+ return null;
+ }
+
+ RangerPolicy policy = policies.get(0);
+ // Delegating Gravitino management policies cannot contain duplicate
privilege
+ policy.getPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+ policy.getDenyPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getRowFilterPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
policy.getDataMaskPolicyItems().forEach(RangerHelper::checkPolicyItemAccess);
+
+ return policy;
+ }
+
+ @Override
+ /** Wildcard search the Ranger policies in the different Ranger service. */
+ protected List<RangerPolicy> wildcardSearchPolies(
+ AuthorizationMetadataObject authzMetadataObject) {
+ Preconditions.checkArgument(authzMetadataObject instanceof
PathBasedMetadataObject);
+ PathBasedMetadataObject pathBasedMetadataObject =
(PathBasedMetadataObject) authzMetadataObject;
+ List<String> resourceDefines = policyResourceDefinesRule();
+ Map<String, String> searchFilters = new HashMap<>();
+ searchFilters.put(SearchFilter.SERVICE_NAME, rangerServiceName);
+ resourceDefines.stream()
+ .forEach(
+ resourceDefine -> {
+ searchFilters.put(
+ SearchFilter.RESOURCE_PREFIX + resourceDefine,
pathBasedMetadataObject.path());
+ });
+ try {
+ List<RangerPolicy> policies = rangerClient.findPolicies(searchFilters);
+ return policies;
+ } catch (RangerServiceException e) {
+ throw new AuthorizationPluginException(e, "Failed to find the policies
in the Ranger");
+ }
+ }
+
+ /**
+ * IF rename the SCHEMA, Need to rename these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF rename the TABLE, Need to rename these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ */
+ @Override
+ protected void doRenameMetadataObject(
+ AuthorizationMetadataObject authzMetadataObject,
+ AuthorizationMetadataObject newAuthzMetadataObject) {
+ List<Map<String, String>> loop;
+ if (newAuthzMetadataObject.type().equals(SCHEMA)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(TABLE)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(RangerHelper.RESOURCE_ALL,
RangerHelper.RESOURCE_ALL));
+ } else if (newAuthzMetadataObject.type().equals(COLUMN)) {
+ loop =
+ ImmutableList.of(
+ ImmutableMap.of(
+ authzMetadataObject.names().get(0),
newAuthzMetadataObject.names().get(0)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(1),
newAuthzMetadataObject.names().get(1)),
+ ImmutableMap.of(
+ authzMetadataObject.names().get(2),
newAuthzMetadataObject.names().get(2)));
+ } else if (newAuthzMetadataObject.type().equals(PATH)) {
+ // do nothing when fileset is renamed
+ return;
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported metadata object type: " + authzMetadataObject.type());
+ }
+
+ List<String> oldMetadataNames = new ArrayList<>();
+ List<String> newMetadataNames = new ArrayList<>();
+ for (int index = 0; index < loop.size(); index++) {
+
oldMetadataNames.add(loop.get(index).keySet().stream().findFirst().get());
+
newMetadataNames.add(loop.get(index).values().stream().findFirst().get());
+
+ AuthorizationMetadataObject.Type type =
+ (index == 0
+ ? RangerHadoopSQLMetadataObject.Type.SCHEMA
+ : (index == 1
+ ? RangerHadoopSQLMetadataObject.Type.TABLE
+ : RangerHadoopSQLMetadataObject.Type.COLUMN));
+ AuthorizationMetadataObject oldHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(oldMetadataNames),
+ AuthorizationMetadataObject.getLastName(oldMetadataNames),
+ type);
+ AuthorizationMetadataObject newHadoopSQLMetadataObject =
+ new RangerHadoopSQLMetadataObject(
+ AuthorizationMetadataObject.getParentFullName(newMetadataNames),
+ AuthorizationMetadataObject.getLastName(newMetadataNames),
+ type);
+ updatePolicyByMetadataObject(
+ MetadataObject.Type.SCHEMA, oldHadoopSQLMetadataObject,
newHadoopSQLMetadataObject);
+ }
+ }
+
+ @Override
+ protected void updatePolicyByMetadataObject(
+ MetadataObject.Type operationType,
+ AuthorizationMetadataObject oldAuthzMetaobject,
+ AuthorizationMetadataObject newAuthzMetaobject) {
+ List<RangerPolicy> oldPolicies = wildcardSearchPolies(oldAuthzMetaobject);
+ List<RangerPolicy> existNewPolicies =
wildcardSearchPolies(newAuthzMetaobject);
+ if (oldPolicies.isEmpty()) {
+ LOG.warn("Cannot find the Ranger policy for the metadata object({})!",
oldAuthzMetaobject);
+ }
+ if (!existNewPolicies.isEmpty()) {
+ LOG.warn("The Ranger policy for the metadata object({}) already
exists!", newAuthzMetaobject);
+ }
+ Map<MetadataObject.Type, Integer> operationTypeIndex =
+ ImmutableMap.of(
+ MetadataObject.Type.SCHEMA, 0,
+ MetadataObject.Type.TABLE, 1,
+ MetadataObject.Type.COLUMN, 2);
+ oldPolicies.stream()
+ .forEach(
+ policy -> {
+ try {
+ String policyName = policy.getName();
+ int index = operationTypeIndex.get(operationType);
+
+ // Update the policy name is following Gravitino's spec
+ if (policy
+ .getName()
+ .equals(
+
AuthorizationSecurableObject.DOT_JOINER.join(oldAuthzMetaobject.names()))) {
+ List<String> policyNames =
+ Lists.newArrayList(
+
AuthorizationSecurableObject.DOT_SPLITTER.splitToList(policyName));
+ Preconditions.checkArgument(
+ policyNames.size() >= oldAuthzMetaobject.names().size(),
+ String.format("The policy name(%s) is invalid!",
policyName));
+ if
(policyNames.get(index).equals(RangerHelper.RESOURCE_ALL)) {
+ // Doesn't need to rename the policy `*`
+ return;
+ }
+ policyNames.set(index,
newAuthzMetaobject.names().get(index));
+
policy.setName(AuthorizationSecurableObject.DOT_JOINER.join(policyNames));
+ }
+ // Update the policy resource name to new name
+ policy
+ .getResources()
+ .put(
+ rangerHelper.policyResourceDefines.get(index),
+ new RangerPolicy.RangerPolicyResource(
+ newAuthzMetaobject.names().get(index)));
+
+ boolean alreadyExist =
+ existNewPolicies.stream()
+ .anyMatch(
+ existNewPolicy ->
+
existNewPolicy.getName().equals(policy.getName())
+ ||
existNewPolicy.getResources().equals(policy.getResources()));
+ if (alreadyExist) {
+ LOG.warn(
+ "The Ranger policy for the metadata object({}) already
exists!",
+ newAuthzMetaobject);
+ return;
+ }
+
+ // Update the policy
+ rangerClient.updatePolicy(policy.getId(), policy);
+ } catch (RangerServiceException e) {
+ LOG.error("Failed to rename the policy {}!", policy);
+ throw new RuntimeException(e);
+ }
+ });
+ }
+
+ /**
+ * IF remove the SCHEMA, need to remove these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` <br>
+ * IF remove the TABLE, need to remove these the relevant policies,
`{schema}.*`, `{schema}.*.*`
+ * <br>
+ * IF remove the COLUMN, Only need to remove `{schema}.*.*` <br>
+ */
+ @Override
+ protected void doRemoveMetadataObject(AuthorizationMetadataObject
authzMetadataObject) {
+ if (authzMetadataObject.type().equals(SCHEMA)) {
+ doRemoveSchemaMetadataObject(authzMetadataObject);
+ } else if (authzMetadataObject.type().equals(TABLE)) {
+ doRemoveTableMetadataObject(authzMetadataObject);
+ } else if (authzMetadataObject.type().equals(PATH)) {
+ removePolicyByMetadataObject(authzMetadataObject);
+ } else {
+ throw new IllegalArgumentException(
+ "Unsupported authorization metadata object type: " +
authzMetadataObject.type());
+ }
+ }
+
+ /**
+ * Remove the SCHEMA, Need to remove these the relevant policies,
`{schema}`, `{schema}.*`,
+ * `{schema}.*.*` permissions.
+ */
+ private void doRemoveSchemaMetadataObject(AuthorizationMetadataObject
authzMetadataObject) {
+ Preconditions.checkArgument(
+ authzMetadataObject instanceof PathBasedMetadataObject,
+ "The metadata object must be a PathBasedMetadataObject");
+ Preconditions.checkArgument(
+ authzMetadataObject.type() == SCHEMA, "The metadata object type must
be SCHEMA");
+ Preconditions.checkArgument(
+ authzMetadataObject.names().size() == 1, "The metadata object names
must be 1");
+ if (RangerHelper.RESOURCE_ALL.equals(authzMetadataObject.name())) {
+ // Remove all schema in this catalog
+ NameIdentifier[] catalogs =
+
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+ Arrays.asList(catalogs).stream()
+ .forEach(
+ catalog -> {
+ List<String> catalogLocations =
+ AuthorizationUtils.getMetadataObjectLocation(
+ NameIdentifier.of(catalog.name()),
Entity.EntityType.CATALOG);
+ catalogLocations.stream()
+ .forEach(
+ locationPath -> {
+ AuthorizationMetadataObject catalogMetadataObject =
+ new PathBasedMetadataObject(
+ metalake, catalog.name(), locationPath,
PATH);
+ doRemoveSchemaMetadataObject(catalogMetadataObject);
+ });
+ });
+ } else {
+ // Remove all table in this schema
+ NameIdentifier[] tables =
+ GravitinoEnv.getInstance()
+ .tableDispatcher()
+ .listTables(Namespace.of(authzMetadataObject.name()));
+ Arrays.asList(tables).stream()
+ .forEach(
+ table -> {
+ NameIdentifier identifier =
+ NameIdentifier.of(authzMetadataObject.name(),
table.name());
+ List<String> tabLocations =
+ AuthorizationUtils.getMetadataObjectLocation(
+ identifier, Entity.EntityType.TABLE);
+ tabLocations.stream()
+ .forEach(
+ locationPath -> {
+ AuthorizationMetadataObject tableMetadataObject =
+ new PathBasedMetadataObject(
+ authzMetadataObject.name(), table.name(),
locationPath, PATH);
+ doRemoveTableMetadataObject(tableMetadataObject);
+ });
+ // Remove schema
+ Schema schema =
+ GravitinoEnv.getInstance()
+ .schemaDispatcher()
+
.loadSchema(NameIdentifier.of(authzMetadataObject.name()));
+ List<String> schemaLocations =
+ AuthorizationUtils.getMetadataObjectLocation(
+ identifier, Entity.EntityType.SCHEMA);
+ schemaLocations.stream()
+ .forEach(
+ locationPath -> {
+ AuthorizationMetadataObject schemaMetadataObject =
+ new PathBasedMetadataObject(
+ authzMetadataObject.name(), schema.name(),
locationPath, PATH);
+ removePolicyByMetadataObject(schemaMetadataObject);
+ });
+ });
Review Comment:
Yes, If Gravitino removes a `Schame`, then we need to remove all table
location permission in this `schema`.
##########
core/src/main/java/org/apache/gravitino/authorization/AuthorizationUtils.java:
##########
@@ -364,4 +378,129 @@ private static void checkCatalogType(
catalogIdent, catalog.type(), privilege);
}
}
+
+ public static List<String> getMetadataObjectLocation(
+ NameIdentifier ident, Entity.EntityType type) {
+ List<String> locations = new ArrayList<>();
+ MetadataObject metadataObject;
+ try {
+ metadataObject = NameIdentifierUtil.toMetadataObject(ident, type);
+ } catch (IllegalArgumentException e) {
+ LOG.warn("Illegal argument exception for metadata object %s type %s",
ident, type, e);
+ return locations;
+ }
+
+ String metalake =
+ (type == Entity.EntityType.METALAKE ? ident.name() :
ident.namespace().level(0));
+ try {
+ switch (metadataObject.type()) {
+ case METALAKE:
+ {
+ NameIdentifier[] identifiers =
+
GravitinoEnv.getInstance().catalogDispatcher().listCatalogs(Namespace.of(metalake));
+ List<String> finalLocationPath = locations;
+ Arrays.stream(identifiers)
+ .collect(Collectors.toList())
+ .forEach(
+ identifier -> {
+ Catalog catalogObj =
+
GravitinoEnv.getInstance().catalogDispatcher().loadCatalog(identifier);
+ if (catalogObj.provider().equals("hive")) {
+ Schema schema =
+ GravitinoEnv.getInstance()
+ .schemaDispatcher()
+ .loadSchema(
+ NameIdentifier.of(
+ metalake,
+ catalogObj.name(),
+ "default" /*Hive default schema*/));
Review Comment:
In fact, The Hive's `default` schema location is Hive warehouse root path.
Not a schema db path.
##########
core/src/main/java/org/apache/gravitino/hook/FilesetHookDispatcher.java:
##########
@@ -103,8 +104,11 @@ public Fileset alterFileset(NameIdentifier ident,
FilesetChange... changes)
@Override
public boolean dropFileset(NameIdentifier ident) {
+ List<String> locations =
+ AuthorizationUtils.getMetadataObjectLocation(ident,
Entity.EntityType.FILESET);
boolean dropped = dispatcher.dropFileset(ident);
- AuthorizationUtils.authorizationPluginRemovePrivileges(ident,
Entity.EntityType.FILESET);
+ AuthorizationUtils.authorizationPluginRemovePrivileges(
+ ident, Entity.EntityType.FILESET, locations);
return dropped;
Review Comment:
We need to get metadata location before dropping.
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHDFSPlugin.java:
##########
@@ -183,36 +540,66 @@ public List<AuthorizationSecurableObject>
translatePrivilege(SecurableObject sec
// in the RangerAuthorizationHDFSPlugin.
break;
case USE_SCHEMA:
+ switch (securableObject.type()) {
+ case METALAKE:
+ case CATALOG:
+ case SCHEMA:
+ AuthorizationUtils.getMetadataObjectLocation(
+ identifier,
MetadataObjectUtil.toEntityType(securableObject))
+ .stream()
+ .forEach(
+ locationPath -> {
+ PathBasedMetadataObject pathBaseMetadataObject
=
+ new PathBasedMetadataObject(
+ securableObject.parent(),
+ securableObject.name(),
+ locationPath,
+ PathBasedMetadataObject.Type.PATH);
+
pathBaseMetadataObject.validateAuthorizationMetadataObject();
+ rangerSecurableObjects.add(
+ generateAuthorizationSecurableObject(
+ pathBaseMetadataObject.names(),
+ locationPath,
+ PathBasedMetadataObject.Type.PATH,
+ rangerPrivileges));
+ });
+ break;
+ default:
+ checkOmissionTranslate(
Review Comment:
OK, change to `checkUnprocessedTranslate()`
##########
authorizations/authorization-ranger/src/main/java/org/apache/gravitino/authorization/ranger/RangerAuthorizationHadoopSQLPlugin.java:
##########
@@ -435,6 +788,62 @@ public AuthorizationMetadataObject
translateMetadataObject(MetadataObject metada
AuthorizationMetadataObject.getLastName(nsMetadataObject),
type);
rangerHadoopSQLMetadataObject.validateAuthorizationMetadataObject();
- return rangerHadoopSQLMetadataObject;
+ return ImmutableList.of(rangerHadoopSQLMetadataObject);
+ }
+
+ @Override
+ public Boolean onMetadataUpdated(MetadataObjectChange... changes) throws
RuntimeException {
+ for (MetadataObjectChange change : changes) {
+ if (change instanceof MetadataObjectChange.RenameMetadataObject) {
+ MetadataObject metadataObject =
+ ((MetadataObjectChange.RenameMetadataObject)
change).metadataObject();
+ MetadataObject newMetadataObject =
+ ((MetadataObjectChange.RenameMetadataObject)
change).newMetadataObject();
+ Preconditions.checkArgument(
+ metadataObject.type() == newMetadataObject.type(),
+ "The old and new metadata object type must be equal!");
+ if (metadataObject.type() == MetadataObject.Type.METALAKE) {
+ // Rename the metalake name
+ this.metalake = newMetadataObject.name();
+ // Did not need to update the Ranger policy
+ continue;
+ } else if (metadataObject.type() == MetadataObject.Type.CATALOG) {
+ // Did not need to update the Ranger policy
+ continue;
+ }
+ List<AuthorizationMetadataObject> oldAuthzMetadataObjects =
+ translateMetadataObject(metadataObject);
+ List<AuthorizationMetadataObject> newAuthzMetadataObjects =
+ translateMetadataObject(newMetadataObject);
+ Preconditions.checkArgument(
+ oldAuthzMetadataObjects.size() == newAuthzMetadataObjects.size(),
+ "The old and new metadata objects size must be equal!");
+ for (int i = 0; i < oldAuthzMetadataObjects.size(); i++) {
+ AuthorizationMetadataObject oldAuthMetadataObject =
oldAuthzMetadataObjects.get(i);
+ AuthorizationMetadataObject newAuthzMetadataObject =
newAuthzMetadataObjects.get(i);
+ if (oldAuthMetadataObject.equals(newAuthzMetadataObject)) {
+ LOG.info(
+ "The metadata object({}) and new metadata object({}) are
equal, so ignore rename!",
+ oldAuthMetadataObject.fullName(),
+ newAuthzMetadataObject.fullName());
+ continue;
+ }
+ doRenameMetadataObject(oldAuthMetadataObject,
newAuthzMetadataObject);
+ }
+ } else if (change instanceof MetadataObjectChange.RemoveMetadataObject) {
+ MetadataObject metadataObject =
+ ((MetadataObjectChange.RemoveMetadataObject)
change).metadataObject();
+ // if (metadataObject.type() != MetadataObject.Type.FILESET) {
Review Comment:
DONE.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]