http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/ebe83454/hbase-agent/src/main/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorImpl.java ---------------------------------------------------------------------- diff --git a/hbase-agent/src/main/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorImpl.java b/hbase-agent/src/main/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorImpl.java new file mode 100644 index 0000000..66b0dd0 --- /dev/null +++ b/hbase-agent/src/main/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorImpl.java @@ -0,0 +1,1393 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.apache.ranger.authorization.hbase; +import java.io.IOException; +import java.net.InetAddress; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Calendar; +import java.util.Collection; +import java.util.Collections; +import java.util.Date; +import java.util.GregorianCalendar; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.NavigableSet; +import java.util.Set; +import java.util.TimeZone; + +import org.apache.commons.collections.CollectionUtils; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.CoprocessorEnvironment; +import org.apache.hadoop.hbase.HColumnDescriptor; +import org.apache.hadoop.hbase.HRegionInfo; +import org.apache.hadoop.hbase.HTableDescriptor; +import org.apache.hadoop.hbase.NamespaceDescriptor; +import org.apache.hadoop.hbase.ServerName; +import org.apache.hadoop.hbase.TableName; +import org.apache.hadoop.hbase.client.Append; +import org.apache.hadoop.hbase.client.Delete; +import org.apache.hadoop.hbase.client.Durability; +import org.apache.hadoop.hbase.client.Get; +import org.apache.hadoop.hbase.client.Increment; +import org.apache.hadoop.hbase.client.Put; +import org.apache.hadoop.hbase.client.Result; +import org.apache.hadoop.hbase.client.Scan; +import org.apache.hadoop.hbase.coprocessor.CoprocessorException; +import org.apache.hadoop.hbase.coprocessor.CoprocessorService; +import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.ObserverContext; +import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment; +import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment; +import org.apache.hadoop.hbase.filter.ByteArrayComparable; +import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp; +import org.apache.hadoop.hbase.filter.Filter; +import org.apache.hadoop.hbase.filter.FilterList; +import org.apache.hadoop.hbase.ipc.RpcServer; +import org.apache.hadoop.hbase.protobuf.ProtobufUtil; +import org.apache.hadoop.hbase.protobuf.ResponseConverter; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos; +import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService; +import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.SnapshotDescription; +import org.apache.hadoop.hbase.protobuf.generated.QuotaProtos.Quotas; +import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.CleanupBulkLoadRequest; +import org.apache.hadoop.hbase.protobuf.generated.SecureBulkLoadProtos.PrepareBulkLoadRequest; +import org.apache.hadoop.hbase.regionserver.InternalScanner; +import org.apache.hadoop.hbase.regionserver.Region; +import org.apache.hadoop.hbase.regionserver.RegionScanner; +import org.apache.hadoop.hbase.regionserver.ScanType; +import org.apache.hadoop.hbase.regionserver.Store; +import org.apache.hadoop.hbase.regionserver.StoreFile; +import org.apache.hadoop.hbase.regionserver.wal.WALEdit; +import org.apache.hadoop.hbase.security.AccessDeniedException; +import org.apache.hadoop.hbase.security.User; +import org.apache.hadoop.hbase.security.access.Permission; +import org.apache.hadoop.hbase.security.access.Permission.Action; +import org.apache.hadoop.hbase.security.access.TablePermission; +import org.apache.hadoop.hbase.security.access.UserPermission; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; +import org.apache.hadoop.security.AccessControlException; +import org.apache.ranger.audit.model.AuthzAuditEvent; +import org.apache.ranger.authorization.hadoop.config.RangerConfiguration; +import org.apache.ranger.authorization.hadoop.constants.RangerHadoopConstants; +import org.apache.ranger.authorization.utils.StringUtil; +import org.apache.ranger.plugin.audit.RangerDefaultAuditHandler; +import org.apache.ranger.plugin.policyengine.RangerAccessRequest; +import org.apache.ranger.plugin.policyengine.RangerAccessResultProcessor; +import org.apache.ranger.plugin.service.RangerBasePlugin; +import org.apache.ranger.plugin.util.GrantRevokeRequest; + +import com.google.common.base.Objects; +import com.google.common.collect.Lists; +import com.google.common.collect.MapMaker; +import com.google.protobuf.RpcCallback; +import com.google.protobuf.RpcController; +import com.google.protobuf.Service; + +public class RangerAuthorizationCoprocessorImpl extends RangerAuthorizationCoprocessorBase implements AccessControlService.Interface, CoprocessorService { + private static final Log LOG = LogFactory.getLog(RangerAuthorizationCoprocessorImpl.class.getName()); + private static boolean UpdateRangerPoliciesOnGrantRevoke = RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_DEFAULT_VALUE; + private static final String GROUP_PREFIX = "@"; + + private static final String WILDCARD = "*"; + private static final String NAMESPACE_SEPARATOR = ":"; + + private static final TimeZone gmtTimeZone = TimeZone.getTimeZone("GMT+0"); + + private RegionCoprocessorEnvironment regionEnv; + private Map<InternalScanner, String> scannerOwners = new MapMaker().weakKeys().makeMap(); + + /* + * These are package level only for testability and aren't meant to be exposed outside via getters/setters or made available to derived classes. + */ + final HbaseFactory _factory = HbaseFactory.getInstance(); + final HbaseUserUtils _userUtils = _factory.getUserUtils(); + final HbaseAuthUtils _authUtils = _factory.getAuthUtils(); + private static volatile RangerHBasePlugin hbasePlugin = null; + + // Utilities Methods + protected byte[] getTableName(RegionCoprocessorEnvironment e) { + Region region = e.getRegion(); + byte[] tableName = null; + if (region != null) { + HRegionInfo regionInfo = region.getRegionInfo(); + if (regionInfo != null) { + tableName = regionInfo.getTable().getName() ; + } + } + return tableName; + } + protected void requireSystemOrSuperUser(Configuration conf) throws IOException { + User user = User.getCurrent(); + if (user == null) { + throw new IOException("Unable to obtain the current user, authorization checks for internal operations will not work correctly!"); + } + String systemUser = user.getShortName(); + User activeUser = getActiveUser(); + if (!Objects.equal(systemUser, activeUser.getShortName()) && !_userUtils.isSuperUser(activeUser)) { + throw new AccessDeniedException("User '" + user.getShortName() + "is not system or super user."); + } + } + protected boolean isSpecialTable(HRegionInfo regionInfo) { + return isSpecialTable(regionInfo.getTable().getName()); + } + protected boolean isSpecialTable(byte[] tableName) { + return isSpecialTable(Bytes.toString(tableName)); + } + protected boolean isSpecialTable(String input) { + final String[] specialTables = new String[] { "hbase:meta", "-ROOT-", ".META."}; + for (String specialTable : specialTables ) { + if (specialTable.equals(input)) { + return true; + } + } + + return false; + } + protected boolean isAccessForMetaTables(RegionCoprocessorEnvironment env) { + HRegionInfo hri = env.getRegion().getRegionInfo(); + + if (hri.isMetaTable() || hri.isMetaRegion()) { + return true; + } else { + return false; + } + } + + private User getActiveUser() { + User user = RpcServer.getRequestUser(); + if (user == null) { + // for non-rpc handling, fallback to system user + try { + user = User.getCurrent(); + } catch (IOException e) { + LOG.error("Unable to find the current user"); + user = null; + } + } + return user; + } + + private String getRemoteAddress() { + InetAddress remoteAddr = RpcServer.getRemoteAddress(); + + if(remoteAddr == null) { + remoteAddr = RpcServer.getRemoteIp(); + } + + String strAddr = remoteAddr != null ? remoteAddr.getHostAddress() : null; + + return strAddr; + } + + // Methods that are used within the CoProcessor + private void requireScannerOwner(InternalScanner s) throws AccessDeniedException { + if (!RpcServer.isInRpcCallContext()) { + return; + } + + String requestUserName = RpcServer.getRequestUserName(); + String owner = scannerOwners.get(s); + if (owner != null && !owner.equals(requestUserName)) { + throw new AccessDeniedException("User '"+ requestUserName +"' is not the scanner owner!"); + } + } + /** + * @param families + * @return empty map if families is null, would never have empty or null keys, would never have null values, values could be empty (non-null) set + */ + Map<String, Set<String>> getColumnFamilies(Map<byte[], ? extends Collection<?>> families) { + if (families == null) { + // null families map passed. Ok, returning empty map. + return Collections.<String, Set<String>>emptyMap(); + } + Map<String, Set<String>> result = new HashMap<String, Set<String>>(); + for (Map.Entry<byte[], ? extends Collection<?>> anEntry : families.entrySet()) { + byte[] familyBytes = anEntry.getKey(); + String family = Bytes.toString(familyBytes); + if (family == null || family.isEmpty()) { + LOG.error("Unexpected Input: got null or empty column family (key) in families map! Ignoring..."); + } else { + Collection<?> columnCollection = anEntry.getValue(); + if (CollectionUtils.isEmpty(columnCollection)) { + // family points to null map, OK. + result.put(family, Collections.<String> emptySet()); + } else { + Iterator<String> columnIterator = new ColumnIterator(columnCollection); + Set<String> columns = new HashSet<String>(); + while (columnIterator.hasNext()) { + String column = columnIterator.next(); + columns.add(column); + } + result.put(family, columns); + } + } + } + return result; + } + + static class ColumnFamilyAccessResult { + final boolean _everythingIsAccessible; + final boolean _somethingIsAccessible; + final List<AuthzAuditEvent> _accessAllowedEvents; + final List<AuthzAuditEvent> _familyLevelAccessEvents; + final AuthzAuditEvent _accessDeniedEvent; + final String _denialReason; + final RangerAuthorizationFilter _filter; + + ColumnFamilyAccessResult(boolean everythingIsAccessible, boolean somethingIsAccessible, + List<AuthzAuditEvent> accessAllowedEvents, List<AuthzAuditEvent> familyLevelAccessEvents, AuthzAuditEvent accessDeniedEvent, String denialReason, + RangerAuthorizationFilter filter) { + _everythingIsAccessible = everythingIsAccessible; + _somethingIsAccessible = somethingIsAccessible; + // WARNING: we are just holding on to reference of the collection. Potentially risky optimization + _accessAllowedEvents = accessAllowedEvents; + _familyLevelAccessEvents = familyLevelAccessEvents; + _accessDeniedEvent = accessDeniedEvent; + _denialReason = denialReason; + // cached values of access results + _filter = filter; + } + + @Override + public String toString() { + return Objects.toStringHelper(getClass()) + .add("everythingIsAccessible", _everythingIsAccessible) + .add("somethingIsAccessible", _somethingIsAccessible) + .add("accessAllowedEvents", _accessAllowedEvents) + .add("familyLevelAccessEvents", _familyLevelAccessEvents) + .add("accessDeniedEvent", _accessDeniedEvent) + .add("denialReason", _denialReason) + .add("filter", _filter) + .toString(); + + } + } + + ColumnFamilyAccessResult evaluateAccess(String operation, Action action, final RegionCoprocessorEnvironment env, + final Map<byte[], ? extends Collection<?>> familyMap) throws AccessDeniedException { + + String access = _authUtils.getAccess(action); + User user = getActiveUser(); + String userName = _userUtils.getUserAsString(user); + + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("evaluateAccess: entered: user[%s], Operation[%s], access[%s], families[%s]", + userName, operation, access, getColumnFamilies(familyMap).toString())); + } + + byte[] tableBytes = getTableName(env); + if (tableBytes == null || tableBytes.length == 0) { + String message = "evaluateAccess: Unexpected: Couldn't get table from RegionCoprocessorEnvironment. Access denied, not audited"; + LOG.debug(message); + throw new AccessDeniedException("Insufficient permissions for operation '" + operation + "',action: " + action); + } + String table = Bytes.toString(tableBytes); + + final String messageTemplate = "evaluateAccess: exiting: user[%s], Operation[%s], access[%s], families[%s], verdict[%s]"; + ColumnFamilyAccessResult result; + if (canSkipAccessCheck(operation, access, table) || canSkipAccessCheck(operation, access, env)) { + LOG.debug("evaluateAccess: exiting: isKnownAccessPattern returned true: access allowed, not audited"); + result = new ColumnFamilyAccessResult(true, true, null, null, null, null, null); + if (LOG.isDebugEnabled()) { + Map<String, Set<String>> families = getColumnFamilies(familyMap); + String message = String.format(messageTemplate, userName, operation, access, families.toString(), result.toString()); + LOG.debug(message); + } + return result; + } + + // let's create a session that would be reused. Set things on it that won't change. + HbaseAuditHandler auditHandler = _factory.getAuditHandler(); + AuthorizationSession session = new AuthorizationSession(hbasePlugin) + .operation(operation) + .remoteAddress(getRemoteAddress()) + .auditHandler(auditHandler) + .user(user) + .access(access) + .table(table); + Map<String, Set<String>> families = getColumnFamilies(familyMap); + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: families to process: " + families.toString()); + } + if (families == null || families.isEmpty()) { + LOG.debug("evaluateAccess: Null or empty families collection, ok. Table level access is desired"); + session.buildRequest() + .authorize(); + boolean authorized = session.isAuthorized(); + String reason = ""; + if (authorized) { + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: table level access granted [" + table + "]"); + } + } else { + reason = String.format("Insufficient permissions for user â%s',action: %s, tableName:%s, no column families found.", user.getName(), operation, table); + } + AuthzAuditEvent event = auditHandler.getAndDiscardMostRecentEvent(); // this could be null, of course, depending on audit settings of table. + + // if authorized then pass captured events as access allowed set else as access denied set. + result = new ColumnFamilyAccessResult(authorized, authorized, + authorized ? Collections.singletonList(event) : null, + null, authorized ? null : event, reason, null); + if (LOG.isDebugEnabled()) { + String message = String.format(messageTemplate, userName, operation, access, families.toString(), result.toString()); + LOG.debug(message); + } + return result; + } else { + LOG.debug("evaluateAccess: Families collection not null. Skipping table-level check, will do finer level check"); + } + + boolean everythingIsAccessible = true; + boolean somethingIsAccessible = false; + /* + * we would have to accumulate audits of all successful accesses and any one denial (which in our case ends up being the last denial) + * We need to keep audit events for family level access check seperate because we don't want them logged in some cases. + */ + List<AuthzAuditEvent> authorizedEvents = new ArrayList<AuthzAuditEvent>(); + List<AuthzAuditEvent> familyLevelAccessEvents = new ArrayList<AuthzAuditEvent>(); + AuthzAuditEvent deniedEvent = null; + String denialReason = null; + // we need to cache the auths results so that we can create a filter, if needed + Map<String, Set<String>> columnsAccessAllowed = new HashMap<String, Set<String>>(); + Set<String> familesAccessAllowed = new HashSet<String>(); + Set<String> familesAccessDenied = new HashSet<String>(); + Set<String> familesAccessIndeterminate = new HashSet<String>(); + + for (Map.Entry<String, Set<String>> anEntry : families.entrySet()) { + String family = anEntry.getKey(); + session.columnFamily(family); + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: Processing family: " + family); + } + Set<String> columns = anEntry.getValue(); + if (columns == null || columns.isEmpty()) { + LOG.debug("evaluateAccess: columns collection null or empty, ok. Family level access is desired."); + session.column(null) // zap stale column from prior iteration of this loop, if any + .buildRequest() + .authorize(); + AuthzAuditEvent auditEvent = auditHandler.getAndDiscardMostRecentEvent(); // capture it only for success + if (session.isAuthorized()) { + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: has family level access [" + family + "]"); + } + // we need to do 3 things: housekeeping, decide about audit events, building the results cache for filter + somethingIsAccessible = true; + familesAccessAllowed.add(family); + if (auditEvent != null) { + LOG.debug("evaluateAccess: adding to family-level-access-granted-event-set"); + familyLevelAccessEvents.add(auditEvent); + } + } else { + everythingIsAccessible = false; + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: no family level access [" + family + "]. Checking if has partial access (of any type)..."); + } + + session.resourceMatchingScope(RangerAccessRequest.ResourceMatchingScope.SELF_OR_DESCENDANTS) + .buildRequest() + .authorize(); + auditEvent = auditHandler.getAndDiscardMostRecentEvent(); // capture it only for failure + if (session.isAuthorized()) { + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: has partial access (of some type) in family [" + family + "]"); + } + // we need to do 3 things: housekeeping, decide about audit events, building the results cache for filter + somethingIsAccessible = true; + familesAccessIndeterminate.add(family); + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: has no access of ["+ access + "] type in family [" + family + "]"); + } + familesAccessDenied.add(family); + denialReason = String.format("Insufficient permissions for user â%s',action: %s, tableName:%s, family:%s.", user.getName(), operation, table, family); + if (auditEvent != null && deniedEvent == null) { // we need to capture just one denial event + LOG.debug("evaluateAccess: Setting denied access audit event with last auth failure audit event."); + deniedEvent = auditEvent; + } + } + // Restore the headMatch setting + session.resourceMatchingScope(RangerAccessRequest.ResourceMatchingScope.SELF); + } + } else { + LOG.debug("evaluateAccess: columns collection not empty. Skipping Family level check, will do finer level access check."); + Set<String> accessibleColumns = new HashSet<String>(); // will be used in to populate our results cache for the filter + for (String column : columns) { + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: Processing column: " + column); + } + session.column(column) + .buildRequest() + .authorize(); + AuthzAuditEvent auditEvent = auditHandler.getAndDiscardMostRecentEvent(); + if (session.isAuthorized()) { + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: has column level access [" + family + ", " + column + "]"); + } + // we need to do 3 things: housekeeping, capturing audit events, building the results cache for filter + somethingIsAccessible = true; + accessibleColumns.add(column); + if (auditEvent != null) { + LOG.debug("evaluateAccess: adding to access-granted-audit-event-set"); + authorizedEvents.add(auditEvent); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("evaluateAccess: no column level access [" + family + ", " + column + "]"); + } + everythingIsAccessible = false; + denialReason = String.format("Insufficient permissions for user â%s',action: %s, tableName:%s, family:%s, column: %s", user.getName(), operation, table, family, column); + if (auditEvent != null && deniedEvent == null) { // we need to capture just one denial event + LOG.debug("evaluateAccess: Setting denied access audit event with last auth failure audit event."); + deniedEvent = auditEvent; + } + } + if (!accessibleColumns.isEmpty()) { + columnsAccessAllowed.put(family, accessibleColumns); + } + } + } + } + // Cache of auth results are encapsulated the in the filter. Not every caller of the function uses it - only preGet and preOpt will. + RangerAuthorizationFilter filter = new RangerAuthorizationFilter(session, familesAccessAllowed, familesAccessDenied, familesAccessIndeterminate, columnsAccessAllowed); + result = new ColumnFamilyAccessResult(everythingIsAccessible, somethingIsAccessible, authorizedEvents, familyLevelAccessEvents, deniedEvent, denialReason, filter); + if (LOG.isDebugEnabled()) { + String message = String.format(messageTemplate, userName, operation, access, families.toString(), result.toString()); + LOG.debug(message); + } + return result; + } + + Filter authorizeAccess(String operation, Action action, final RegionCoprocessorEnvironment env, final Map<byte[], NavigableSet<byte[]>> familyMap) throws AccessDeniedException { + + if (LOG.isDebugEnabled()) { + LOG.debug("==> authorizeAccess"); + } + try { + ColumnFamilyAccessResult accessResult = evaluateAccess(operation, action, env, familyMap); + RangerDefaultAuditHandler auditHandler = new RangerDefaultAuditHandler(); + if (accessResult._everythingIsAccessible) { + auditHandler.logAuthzAudits(accessResult._accessAllowedEvents); + auditHandler.logAuthzAudits(accessResult._familyLevelAccessEvents); + LOG.debug("authorizeAccess: exiting: No filter returned since all access was allowed"); + return null; // no filter needed since we are good to go. + } else if (accessResult._somethingIsAccessible) { + // NOTE: audit logging is split beween logging here (in scope of preOp/preGet) and logging in the filter component for those that couldn't be determined + auditHandler.logAuthzAudits(accessResult._accessAllowedEvents); + LOG.debug("authorizeAccess: exiting: Filter returned since some access was allowed"); + return accessResult._filter; + } else { + // If we are here then it means nothing was accessible! So let's log one denial (in our case, the last denial) and throw an exception + auditHandler.logAuthzAudit(accessResult._accessDeniedEvent); + LOG.debug("authorizeAccess: exiting: Throwing exception since nothing was accessible"); + throw new AccessDeniedException(accessResult._denialReason); + } + } finally { + if (LOG.isDebugEnabled()) { + LOG.debug("<== authorizeAccess"); + } + } + } + + Filter combineFilters(Filter filter, Filter existingFilter) { + Filter combinedFilter = filter; + if (existingFilter != null) { + combinedFilter = new FilterList(FilterList.Operator.MUST_PASS_ALL, Lists.newArrayList(filter, existingFilter)); + } + return combinedFilter; + } + + void requirePermission(final String operation, final Action action, final RegionCoprocessorEnvironment regionServerEnv, final Map<byte[], ? extends Collection<?>> familyMap) + throws AccessDeniedException { + + ColumnFamilyAccessResult accessResult = evaluateAccess(operation, action, regionServerEnv, familyMap); + RangerDefaultAuditHandler auditHandler = new RangerDefaultAuditHandler(); + if (accessResult._everythingIsAccessible) { + auditHandler.logAuthzAudits(accessResult._accessAllowedEvents); + auditHandler.logAuthzAudits(accessResult._familyLevelAccessEvents); + LOG.debug("requirePermission: exiting: all access was allowed"); + return; + } else { + auditHandler.logAuthzAudit(accessResult._accessDeniedEvent); + LOG.debug("requirePermission: exiting: throwing exception as everything wasn't accessible"); + throw new AccessDeniedException(accessResult._denialReason); + } + } + + /** + * This could run s + * @param operation + * @param otherInformation + * @param table + * @param columnFamily + * @param column + * @return + * @throws AccessDeniedException + */ + void authorizeAccess(String operation, String otherInformation, Action action, String table, String columnFamily, String column) throws AccessDeniedException { + + String access = _authUtils.getAccess(action); + if (LOG.isDebugEnabled()) { + final String format = "authorizeAccess: %s: Operation[%s], Info[%s], access[%s], table[%s], columnFamily[%s], column[%s]"; + String message = String.format(format, "Entering", operation, otherInformation, access, table, columnFamily, column); + LOG.debug(message); + } + + final String format = "authorizeAccess: %s: Operation[%s], Info[%s], access[%s], table[%s], columnFamily[%s], column[%s], allowed[%s], reason[%s]"; + if (canSkipAccessCheck(operation, access, table)) { + if (LOG.isDebugEnabled()) { + String message = String.format(format, "Exiting", operation, otherInformation, access, table, columnFamily, column, true, "can skip auth check"); + LOG.debug(message); + } + return; + } + User user = getActiveUser(); + + HbaseAuditHandler auditHandler = _factory.getAuditHandler(); + AuthorizationSession session = new AuthorizationSession(hbasePlugin) + .operation(operation) + .otherInformation(otherInformation) + .remoteAddress(getRemoteAddress()) + .auditHandler(auditHandler) + .user(user) + .access(access) + .table(table) + .columnFamily(columnFamily) + .column(column) + .buildRequest() + .authorize(); + + if (LOG.isDebugEnabled()) { + boolean allowed = session.isAuthorized(); + String reason = session.getDenialReason(); + String message = String.format(format, "Exiting", operation, otherInformation, access, table, columnFamily, column, allowed, reason); + LOG.debug(message); + } + + session.publishResults(); + } + + boolean canSkipAccessCheck(final String operation, String access, final String table) + throws AccessDeniedException { + + User user = getActiveUser(); + boolean result = false; + if (user == null) { + String message = "Unexpeceted: User is null: access denied, not audited!"; + LOG.warn("canSkipAccessCheck: exiting" + message); + throw new AccessDeniedException("No user associated with request (" + operation + ") for action: " + access + "on table:" + table); + } else if (isAccessForMetadataRead(access, table)) { + LOG.debug("canSkipAccessCheck: true: metadata read access always allowed, not audited"); + result = true; + } else { + LOG.debug("Can't skip access checks"); + } + + return result; + } + + boolean canSkipAccessCheck(final String operation, String access, final RegionCoprocessorEnvironment regionServerEnv) throws AccessDeniedException { + + User user = getActiveUser(); + // read access to metadata tables is always allowed and isn't audited. + if (isAccessForMetaTables(regionServerEnv) && _authUtils.isReadAccess(access)) { + LOG.debug("isKnownAccessPattern: exiting: Read access for metadata tables allowed, not audited!"); + return true; + } + // if write access is desired to metatables then global create access is sufficient + if (_authUtils.isWriteAccess(access) && isAccessForMetaTables(regionServerEnv)) { + String createAccess = _authUtils.getAccess(Action.CREATE); + AuthorizationSession session = new AuthorizationSession(hbasePlugin) + .operation(operation) + .remoteAddress(getRemoteAddress()) + .user(user) + .access(createAccess) + .buildRequest() + .authorize(); + if (session.isAuthorized()) { + // NOTE: this access isn't logged + LOG.debug("isKnownAccessPattern: exiting: User has global create access, allowed!"); + return true; + } + } + return false; + } + + boolean isAccessForMetadataRead(String access, String table) { + if (_authUtils.isReadAccess(access) && isSpecialTable(table)) { + LOG.debug("isAccessForMetadataRead: Metadata tables read: access allowed!"); + return true; + } + return false; + } + + // Check if the user has global permission ... + protected void requireGlobalPermission(String request, String objName, Permission.Action action) throws AccessDeniedException { + authorizeAccess(request, objName, action, null, null, null); + } + + protected void requirePermission(String request, byte[] tableName, Permission.Action action) throws AccessDeniedException { + String table = Bytes.toString(tableName); + + authorizeAccess(request, null, action, table, null, null); + } + + protected void requirePermission(String request, byte[] aTableName, byte[] aColumnFamily, byte[] aQualifier, Permission.Action action) throws AccessDeniedException { + + String table = Bytes.toString(aTableName); + String columnFamily = Bytes.toString(aColumnFamily); + String column = Bytes.toString(aQualifier); + + authorizeAccess(request, null, action, table, columnFamily, column); + } + + protected void requirePermission(String request, Permission.Action perm, RegionCoprocessorEnvironment env, Collection<byte[]> families) throws IOException { + HashMap<byte[], Set<byte[]>> familyMap = new HashMap<byte[], Set<byte[]>>(); + + if(families != null) { + for (byte[] family : families) { + familyMap.put(family, null); + } + } + requirePermission(request, perm, env, familyMap); + } + + @Override + public void postScannerClose(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s) throws IOException { + scannerOwners.remove(s); + } + @Override + public RegionScanner postScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException { + User user = getActiveUser(); + if (user != null && user.getShortName() != null) { + scannerOwners.put(s, user.getShortName()); + } + return s; + } + + @Override + public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> ctx) throws IOException { + //if(UpdateRangerPoliciesOnGrantRevoke) { + //AccessControlLists.createACLTable(ctx.getEnvironment().getMasterServices()); + //RangerAccessControlLists.init(ctx.getEnvironment().getMasterServices()); + //} + } + + @Override + public void preAddColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HColumnDescriptor column) throws IOException { + requirePermission("addColumn", tableName.getName(), null, null, Action.CREATE); + } + @Override + public Result preAppend(ObserverContext<RegionCoprocessorEnvironment> c, Append append) throws IOException { + requirePermission("append", TablePermission.Action.WRITE, c.getEnvironment(), append.getFamilyCellMap()); + return null; + } + @Override + public void preAssign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo) throws IOException { + requirePermission("assign", regionInfo.getTable().getName(), null, null, Action.ADMIN); + } + @Override + public void preBalance(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException { + requirePermission("balance", null, Permission.Action.ADMIN); + } + @Override + public boolean preBalanceSwitch(ObserverContext<MasterCoprocessorEnvironment> c, boolean newValue) throws IOException { + requirePermission("balanceSwitch", null, Permission.Action.ADMIN); + return newValue; + } + @Override + public void preBulkLoadHFile(ObserverContext<RegionCoprocessorEnvironment> ctx, List<Pair<byte[], String>> familyPaths) throws IOException { + List<byte[]> cfs = new LinkedList<byte[]>(); + for (Pair<byte[], String> el : familyPaths) { + cfs.add(el.getFirst()); + } + requirePermission("bulkLoadHFile", Permission.Action.WRITE, ctx.getEnvironment(), cfs); + } + @Override + public boolean preCheckAndDelete(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Delete delete, boolean result) throws IOException { + Collection<byte[]> familyMap = Arrays.asList(new byte[][] { family }); + requirePermission("checkAndDelete", TablePermission.Action.READ, c.getEnvironment(), familyMap); + requirePermission("checkAndDelete", TablePermission.Action.WRITE, c.getEnvironment(), familyMap); + return result; + } + @Override + public boolean preCheckAndPut(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp, ByteArrayComparable comparator, Put put, boolean result) throws IOException { + Collection<byte[]> familyMap = Arrays.asList(new byte[][] { family }); + requirePermission("checkAndPut", TablePermission.Action.READ, c.getEnvironment(), familyMap); + requirePermission("checkAndPut", TablePermission.Action.WRITE, c.getEnvironment(), familyMap); + return result; + } + @Override + public void preCloneSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + requirePermission("cloneSnapshot", null, Permission.Action.ADMIN); + } + @Override + public void preClose(ObserverContext<RegionCoprocessorEnvironment> e, boolean abortRequested) throws IOException { + requirePermission("close", getTableName(e.getEnvironment()), Permission.Action.ADMIN); + } + @Override + public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment> e, Store store, InternalScanner scanner,ScanType scanType) throws IOException { + requirePermission("compact", getTableName(e.getEnvironment()), null, null, Action.CREATE); + return scanner; + } + @Override + public void preCompactSelection(ObserverContext<RegionCoprocessorEnvironment> e, Store store, List<StoreFile> candidates) throws IOException { + requirePermission("compactSelection", getTableName(e.getEnvironment()), null, null, Action.CREATE); + } + + @Override + public void preCreateTable(ObserverContext<MasterCoprocessorEnvironment> c, HTableDescriptor desc, HRegionInfo[] regions) throws IOException { + requirePermission("createTable", desc.getName(), Permission.Action.CREATE); + } + @Override + public void preDelete(ObserverContext<RegionCoprocessorEnvironment> c, Delete delete, WALEdit edit, Durability durability) throws IOException { + requirePermission("delete", TablePermission.Action.WRITE, c.getEnvironment(), delete.getFamilyCellMap()); + } + @Override + public void preDeleteColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, byte[] col) throws IOException { + requirePermission("deleteColumn", tableName.getName(), null, null, Action.CREATE); + } + @Override + public void preDeleteSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot) throws IOException { + requirePermission("deleteSnapshot", null, Permission.Action.ADMIN); + } + @Override + public void preDeleteTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException { + requirePermission("deleteTable", tableName.getName(), null, null, Action.CREATE); + } + @Override + public void preDisableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException { + requirePermission("disableTable", tableName.getName(), null, null, Action.CREATE); + } + @Override + public void preEnableTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName) throws IOException { + requirePermission("enableTable", tableName.getName(), null, null, Action.CREATE); + } + @Override + public boolean preExists(ObserverContext<RegionCoprocessorEnvironment> c, Get get, boolean exists) throws IOException { + requirePermission("exists", TablePermission.Action.READ, c.getEnvironment(), get.familySet()); + return exists; + } + @Override + public void preFlush(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException { + requirePermission("flush", getTableName(e.getEnvironment()), null, null, Action.CREATE); + } + @Override + public void preGetClosestRowBefore(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, Result result) throws IOException { + requirePermission("getClosestRowBefore", TablePermission.Action.READ, c.getEnvironment(), (family != null ? Lists.newArrayList(family) : null)); + } + @Override + public Result preIncrement(ObserverContext<RegionCoprocessorEnvironment> c, Increment increment) throws IOException { + requirePermission("increment", TablePermission.Action.WRITE, c.getEnvironment(), increment.getFamilyCellMap().keySet()); + + return null; + } + @Override + public long preIncrementColumnValue(ObserverContext<RegionCoprocessorEnvironment> c, byte[] row, byte[] family, byte[] qualifier, long amount, boolean writeToWAL) throws IOException { + requirePermission("incrementColumnValue", TablePermission.Action.READ, c.getEnvironment(), Arrays.asList(new byte[][] { family })); + requirePermission("incrementColumnValue", TablePermission.Action.WRITE, c.getEnvironment(), Arrays.asList(new byte[][] { family })); + return -1; + } + @Override + public void preModifyColumn(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HColumnDescriptor descriptor) throws IOException { + requirePermission("modifyColumn", tableName.getName(), null, null, Action.CREATE); + } + @Override + public void preModifyTable(ObserverContext<MasterCoprocessorEnvironment> c, TableName tableName, HTableDescriptor htd) throws IOException { + requirePermission("modifyTable", tableName.getName(), null, null, Action.CREATE); + } + @Override + public void preMove(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo region, ServerName srcServer, ServerName destServer) throws IOException { + requirePermission("move", region.getTable().getName() , null, null, Action.ADMIN); + } + @Override + public void preOpen(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException { + RegionCoprocessorEnvironment env = e.getEnvironment(); + final Region region = env.getRegion(); + if (region == null) { + LOG.error("NULL region from RegionCoprocessorEnvironment in preOpen()"); + } else { + HRegionInfo regionInfo = region.getRegionInfo(); + if (isSpecialTable(regionInfo)) { + requireSystemOrSuperUser(regionEnv.getConfiguration()); + } else { + requirePermission("open", getTableName(e.getEnvironment()), Action.ADMIN); + } + } + } + @Override + public void preRestoreSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + requirePermission("restoreSnapshot", hTableDescriptor.getName(), Permission.Action.ADMIN); + } + + @Override + public void preScannerClose(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s) throws IOException { + requireScannerOwner(s); + } + @Override + public boolean preScannerNext(ObserverContext<RegionCoprocessorEnvironment> c, InternalScanner s, List<Result> result, int limit, boolean hasNext) throws IOException { + requireScannerOwner(s); + return hasNext; + } + @Override + public RegionScanner preScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c, Scan scan, RegionScanner s) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("==> preScannerOpen"); + } + + try { + RegionCoprocessorEnvironment e = c.getEnvironment(); + + Map<byte[], NavigableSet<byte[]>> familyMap = scan.getFamilyMap(); + String operation = "scannerOpen"; + Filter filter = authorizeAccess(operation, Action.READ, e, familyMap); + if (filter == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("preScannerOpen: Access allowed for all families/column. No filter added"); + } + } else { + if (LOG.isDebugEnabled()) { + LOG.debug("preScannerOpen: Access allowed for some of the families/column. New filter added."); + } + Filter existingFilter = scan.getFilter(); + Filter combinedFilter = combineFilters(filter, existingFilter); + scan.setFilter(combinedFilter); + } + return s; + } finally { + if (LOG.isDebugEnabled()) { + LOG.debug("<== preScannerOpen"); + } + } + } + @Override + public void preShutdown(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException { + requirePermission("shutdown", null, Permission.Action.ADMIN); + } + @Override + public void preSnapshot(ObserverContext<MasterCoprocessorEnvironment> ctx, SnapshotDescription snapshot, HTableDescriptor hTableDescriptor) throws IOException { + requirePermission("snapshot", hTableDescriptor.getName(), Permission.Action.ADMIN); + } + @Override + public void preSplit(ObserverContext<RegionCoprocessorEnvironment> e) throws IOException { + requirePermission("split", getTableName(e.getEnvironment()), null, null, Action.ADMIN); + } + @Override + public void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c) throws IOException { + requirePermission("stopMaster", null, Permission.Action.ADMIN); + } + @Override + public void preStopRegionServer(ObserverContext<RegionServerCoprocessorEnvironment> env) throws IOException { + requirePermission("stop", null, Permission.Action.ADMIN); + } + @Override + public void preUnassign(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo, boolean force) throws IOException { + requirePermission("unassign", regionInfo.getTable().getName(), null, null, Action.ADMIN); + } + + @Override + public void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, + final String userName, final Quotas quotas) throws IOException { + requireGlobalPermission("setUserQuota", null, Action.ADMIN); + } + + @Override + public void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, + final String userName, final TableName tableName, final Quotas quotas) throws IOException { + requirePermission("setUserTableQuota", tableName.getName(), null, null, Action.ADMIN); + } + + @Override + public void preSetUserQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, + final String userName, final String namespace, final Quotas quotas) throws IOException { + requireGlobalPermission("setUserNamespaceQuota", namespace, Action.ADMIN); + } + + @Override + public void preSetTableQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, + final TableName tableName, final Quotas quotas) throws IOException { + requirePermission("setTableQuota", tableName.getName(), null, null, Action.ADMIN); + } + + @Override + public void preSetNamespaceQuota(final ObserverContext<MasterCoprocessorEnvironment> ctx, + final String namespace, final Quotas quotas) throws IOException { + requireGlobalPermission("setNamespaceQuota", namespace, Action.ADMIN); + } + + private String coprocessorType = "unknown"; + private static final String MASTER_COPROCESSOR_TYPE = "master"; + private static final String REGIONAL_COPROCESSOR_TYPE = "regional"; + private static final String REGIONAL_SERVER_COPROCESSOR_TYPE = "regionalServer"; + + @Override + public void start(CoprocessorEnvironment env) throws IOException { + String appType = "unknown"; + + if (env instanceof MasterCoprocessorEnvironment) { + coprocessorType = MASTER_COPROCESSOR_TYPE; + appType = "hbaseMaster"; + } else if (env instanceof RegionServerCoprocessorEnvironment) { + coprocessorType = REGIONAL_SERVER_COPROCESSOR_TYPE; + appType = "hbaseRegional"; + } else if (env instanceof RegionCoprocessorEnvironment) { + regionEnv = (RegionCoprocessorEnvironment) env; + coprocessorType = REGIONAL_COPROCESSOR_TYPE; + appType = "hbaseRegional"; + } + + Configuration conf = env.getConfiguration(); + HbaseFactory.initialize(conf); + + // create and initialize the plugin class + RangerHBasePlugin plugin = hbasePlugin; + + if(plugin == null) { + synchronized(RangerAuthorizationCoprocessorImpl.class) { + plugin = hbasePlugin; + + if(plugin == null) { + plugin = new RangerHBasePlugin(appType); + + plugin.init(); + + UpdateRangerPoliciesOnGrantRevoke = RangerConfiguration.getInstance().getBoolean(RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_PROP, RangerHadoopConstants.HBASE_UPDATE_RANGER_POLICIES_ON_GRANT_REVOKE_DEFAULT_VALUE); + + hbasePlugin = plugin; + } + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug("Start of Coprocessor: [" + coprocessorType + "]"); + } + } + @Override + public void prePut(ObserverContext<RegionCoprocessorEnvironment> c, Put put, WALEdit edit, Durability durability) throws IOException { + requirePermission("put", TablePermission.Action.WRITE, c.getEnvironment(), put.getFamilyCellMap()); + } + + @Override + public void preGetOp(final ObserverContext<RegionCoprocessorEnvironment> rEnv, final Get get, final List<Cell> result) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug("==> preGetOp"); + } + try { + RegionCoprocessorEnvironment e = rEnv.getEnvironment(); + Map<byte[], NavigableSet<byte[]>> familyMap = get.getFamilyMap(); + + String operation = "get"; + Filter filter = authorizeAccess(operation, Action.READ, e, familyMap); + if (filter == null) { + if (LOG.isDebugEnabled()) { + LOG.debug("preGetOp: all access allowed, no filter returned"); + } + } else { + Filter existingFilter = get.getFilter(); + Filter combinedFilter = combineFilters(filter, existingFilter); + get.setFilter(combinedFilter); + if (LOG.isDebugEnabled()) { + LOG.debug("preGetOp: partial access, new filter added"); + } + } + } finally { + if (LOG.isDebugEnabled()) { + LOG.debug("<== preGetOp"); + } + } + } + @Override + public void preRegionOffline(ObserverContext<MasterCoprocessorEnvironment> c, HRegionInfo regionInfo) throws IOException { + requirePermission("regionOffline", regionInfo.getTable().getName(), null, null, Action.ADMIN); + } + @Override + public void preCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, NamespaceDescriptor ns) throws IOException { + requireGlobalPermission("createNamespace", ns.getName(), Action.ADMIN); + } + @Override + public void preDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, String namespace) throws IOException { + requireGlobalPermission("deleteNamespace", namespace, Action.ADMIN); + } + @Override + public void preModifyNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx, NamespaceDescriptor ns) throws IOException { + requireGlobalPermission("modifyNamespace", ns.getName(), Action.ADMIN); + } + + @Override + public void postGetTableDescriptors(ObserverContext<MasterCoprocessorEnvironment> ctx, List<TableName> tableNamesList, List<HTableDescriptor> descriptors, String regex) throws IOException { + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("==> postGetTableDescriptors(count(tableNamesList)=%s, count(descriptors)=%s, regex=%s)", tableNamesList == null ? 0 : tableNamesList.size(), + descriptors == null ? 0 : descriptors.size(), regex)); + } + + if (CollectionUtils.isNotEmpty(descriptors)) { + // Retains only those which passes authorization checks + User user = getActiveUser(); + String access = _authUtils.getAccess(Action.CREATE); + HbaseAuditHandler auditHandler = _factory.getAuditHandler(); // this will accumulate audits for all tables that succeed. + AuthorizationSession session = new AuthorizationSession(hbasePlugin) + .operation("getTableDescriptors") + .otherInformation("regex=" + regex) + .remoteAddress(getRemoteAddress()) + .auditHandler(auditHandler) + .user(user) + .access(access); + + Iterator<HTableDescriptor> itr = descriptors.iterator(); + while (itr.hasNext()) { + HTableDescriptor htd = itr.next(); + String tableName = htd.getTableName().getNameAsString(); + session.table(tableName).buildRequest().authorize(); + if (!session.isAuthorized()) { + itr.remove(); + auditHandler.getAndDiscardMostRecentEvent(); + } + } + if (descriptors.size() > 0) { + session.logCapturedEvents(); + } + } + + if (LOG.isDebugEnabled()) { + LOG.debug(String.format("<== postGetTableDescriptors(count(tableNamesList)=%s, count(descriptors)=%s, regex=%s)", tableNamesList == null ? 0 : tableNamesList.size(), + descriptors == null ? 0 : descriptors.size(), regex)); + } + } + + @Override + public void preMerge(ObserverContext<RegionServerCoprocessorEnvironment> ctx, Region regionA, Region regionB) throws IOException { + requirePermission("mergeRegions", regionA.getTableDesc().getTableName().getName(), null, null, Action.ADMIN); + } + + public void prePrepareBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx, PrepareBulkLoadRequest request) throws IOException { + List<byte[]> cfs = null; + + requirePermission("prePrepareBulkLoad", Permission.Action.WRITE, ctx.getEnvironment(), cfs); + } + + public void preCleanupBulkLoad(ObserverContext<RegionCoprocessorEnvironment> ctx, CleanupBulkLoadRequest request) throws IOException { + List<byte[]> cfs = null; + + requirePermission("preCleanupBulkLoad", Permission.Action.WRITE, ctx.getEnvironment(), cfs); + } + + public static Date getUTCDate() { + Calendar local=Calendar.getInstance(); + int offset = local.getTimeZone().getOffset(local.getTimeInMillis()); + GregorianCalendar utc = new GregorianCalendar(gmtTimeZone); + utc.setTimeInMillis(local.getTimeInMillis()); + utc.add(Calendar.MILLISECOND, -offset); + return utc.getTime(); + } + + @Override + public void grant(RpcController controller, AccessControlProtos.GrantRequest request, RpcCallback<AccessControlProtos.GrantResponse> done) { + boolean isSuccess = false; + + if(UpdateRangerPoliciesOnGrantRevoke) { + GrantRevokeRequest grData = null; + + try { + grData = createGrantData(request); + + RangerHBasePlugin plugin = hbasePlugin; + + if(plugin != null) { + RangerAccessResultProcessor auditHandler = new RangerDefaultAuditHandler(); + + plugin.grantAccess(grData, auditHandler); + + isSuccess = true; + } + } catch(AccessControlException excp) { + LOG.warn("grant() failed", excp); + + ResponseConverter.setControllerException(controller, new AccessDeniedException(excp)); + } catch(IOException excp) { + LOG.warn("grant() failed", excp); + + ResponseConverter.setControllerException(controller, excp); + } catch (Exception excp) { + LOG.warn("grant() failed", excp); + + ResponseConverter.setControllerException(controller, new CoprocessorException(excp.getMessage())); + } + } + + AccessControlProtos.GrantResponse response = isSuccess ? AccessControlProtos.GrantResponse.getDefaultInstance() : null; + + done.run(response); + } + + @Override + public void revoke(RpcController controller, AccessControlProtos.RevokeRequest request, RpcCallback<AccessControlProtos.RevokeResponse> done) { + boolean isSuccess = false; + + if(UpdateRangerPoliciesOnGrantRevoke) { + GrantRevokeRequest grData = null; + + try { + grData = createRevokeData(request); + + RangerHBasePlugin plugin = hbasePlugin; + + if(plugin != null) { + RangerAccessResultProcessor auditHandler = new RangerDefaultAuditHandler(); + + plugin.revokeAccess(grData, auditHandler); + + isSuccess = true; + } + } catch(AccessControlException excp) { + LOG.warn("revoke() failed", excp); + + ResponseConverter.setControllerException(controller, new AccessDeniedException(excp)); + } catch(IOException excp) { + LOG.warn("revoke() failed", excp); + + ResponseConverter.setControllerException(controller, excp); + } catch (Exception excp) { + LOG.warn("revoke() failed", excp); + + ResponseConverter.setControllerException(controller, new CoprocessorException(excp.getMessage())); + } + } + + AccessControlProtos.RevokeResponse response = isSuccess ? AccessControlProtos.RevokeResponse.getDefaultInstance() : null; + + done.run(response); + } + + @Override + public void checkPermissions(RpcController controller, AccessControlProtos.CheckPermissionsRequest request, RpcCallback<AccessControlProtos.CheckPermissionsResponse> done) { + LOG.debug("checkPermissions(): "); + } + + @Override + public void getUserPermissions(RpcController controller, AccessControlProtos.GetUserPermissionsRequest request, RpcCallback<AccessControlProtos.GetUserPermissionsResponse> done) { + LOG.debug("getUserPermissions(): "); + } + + @Override + public Service getService() { + return AccessControlProtos.AccessControlService.newReflectiveService(this); + } + + private GrantRevokeRequest createGrantData(AccessControlProtos.GrantRequest request) throws Exception { + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermission up = request.getUserPermission(); + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission perm = up == null ? null : up.getPermission(); + + UserPermission userPerm = up == null ? null : ProtobufUtil.toUserPermission(up); + Permission.Action[] actions = userPerm == null ? null : userPerm.getActions(); + String userName = userPerm == null ? null : Bytes.toString(userPerm.getUser()); + String nameSpace = null; + String tableName = null; + String colFamily = null; + String qualifier = null; + + if(perm == null) { + throw new Exception("grant(): invalid data - permission is null"); + } + + if(StringUtil.isEmpty(userName)) { + throw new Exception("grant(): invalid data - username empty"); + } + + if ((actions == null) || (actions.length == 0)) { + throw new Exception("grant(): invalid data - no action specified"); + } + + switch(perm.getType()) { + case Global: + tableName = colFamily = qualifier = WILDCARD; + break; + + case Table: + tableName = Bytes.toString(userPerm.getTableName().getName()); + colFamily = Bytes.toString(userPerm.getFamily()); + qualifier = Bytes.toString(userPerm.getQualifier()); + break; + + case Namespace: + nameSpace = userPerm.getNamespace(); + break; + } + + if(StringUtil.isEmpty(nameSpace) && StringUtil.isEmpty(tableName) && StringUtil.isEmpty(colFamily) && StringUtil.isEmpty(qualifier)) { + throw new Exception("grant(): namespace/table/columnFamily/columnQualifier not specified"); + } + + tableName = StringUtil.isEmpty(tableName) ? WILDCARD : tableName; + colFamily = StringUtil.isEmpty(colFamily) ? WILDCARD : colFamily; + qualifier = StringUtil.isEmpty(qualifier) ? WILDCARD : qualifier; + + if(! StringUtil.isEmpty(nameSpace)) { + tableName = nameSpace + NAMESPACE_SEPARATOR + tableName; + } + + User activeUser = getActiveUser(); + String grantor = activeUser != null ? activeUser.getShortName() : null; + + Map<String, String> mapResource = new HashMap<String, String>(); + mapResource.put("table", tableName); + mapResource.put("column-family", colFamily); + mapResource.put("column", qualifier); + + GrantRevokeRequest ret = new GrantRevokeRequest(); + + ret.setGrantor(grantor); + ret.setDelegateAdmin(Boolean.FALSE); + ret.setEnableAudit(Boolean.TRUE); + ret.setReplaceExistingPermissions(Boolean.TRUE); + ret.setResource(mapResource); + ret.setClientIPAddress(getRemoteAddress()); + + if(userName.startsWith(GROUP_PREFIX)) { + ret.getGroups().add(userName.substring(GROUP_PREFIX.length())); + } else { + ret.getUsers().add(userName); + } + + for (int i = 0; i < actions.length; i++) { + switch(actions[i].code()) { + case 'R': + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_READ); + break; + + case 'W': + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_WRITE); + break; + + case 'C': + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_CREATE); + break; + + case 'A': + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_ADMIN); + ret.setDelegateAdmin(Boolean.TRUE); + break; + + default: + LOG.warn("grant(): ignoring action '" + actions[i].name() + "' for user '" + userName + "'"); + } + } + + return ret; + } + + private GrantRevokeRequest createRevokeData(AccessControlProtos.RevokeRequest request) throws Exception { + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.UserPermission up = request.getUserPermission(); + org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.Permission perm = up == null ? null : up.getPermission(); + + UserPermission userPerm = up == null ? null : ProtobufUtil.toUserPermission(up); + String userName = userPerm == null ? null : Bytes.toString(userPerm.getUser()); + String nameSpace = null; + String tableName = null; + String colFamily = null; + String qualifier = null; + + if(perm == null) { + throw new Exception("revoke(): invalid data - permission is null"); + } + + if(StringUtil.isEmpty(userName)) { + throw new Exception("revoke(): invalid data - username empty"); + } + + switch(perm.getType()) { + case Global : + tableName = colFamily = qualifier = WILDCARD; + break; + + case Table : + tableName = Bytes.toString(userPerm.getTableName().getName()); + colFamily = Bytes.toString(userPerm.getFamily()); + qualifier = Bytes.toString(userPerm.getQualifier()); + break; + + case Namespace: + nameSpace = userPerm.getNamespace(); + break; + } + + if(StringUtil.isEmpty(nameSpace) && StringUtil.isEmpty(tableName) && StringUtil.isEmpty(colFamily) && StringUtil.isEmpty(qualifier)) { + throw new Exception("revoke(): table/columnFamily/columnQualifier not specified"); + } + + tableName = StringUtil.isEmpty(tableName) ? WILDCARD : tableName; + colFamily = StringUtil.isEmpty(colFamily) ? WILDCARD : colFamily; + qualifier = StringUtil.isEmpty(qualifier) ? WILDCARD : qualifier; + + if(! StringUtil.isEmpty(nameSpace)) { + tableName = nameSpace + NAMESPACE_SEPARATOR + tableName; + } + + User activeUser = getActiveUser(); + String grantor = activeUser != null ? activeUser.getShortName() : null; + + Map<String, String> mapResource = new HashMap<String, String>(); + mapResource.put("table", tableName); + mapResource.put("column-family", colFamily); + mapResource.put("column", qualifier); + + GrantRevokeRequest ret = new GrantRevokeRequest(); + + ret.setGrantor(grantor); + ret.setDelegateAdmin(Boolean.TRUE); // remove delegateAdmin privilege as well + ret.setEnableAudit(Boolean.TRUE); + ret.setReplaceExistingPermissions(Boolean.TRUE); + ret.setResource(mapResource); + ret.setClientIPAddress(getRemoteAddress()); + + if(userName.startsWith(GROUP_PREFIX)) { + ret.getGroups().add(userName.substring(GROUP_PREFIX.length())); + } else { + ret.getUsers().add(userName); + } + + // revoke removes all permissions + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_READ); + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_WRITE); + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_CREATE); + ret.getAccessTypes().add(HbaseAuthUtils.ACCESS_TYPE_ADMIN); + + return ret; + } +} + + +class RangerHBasePlugin extends RangerBasePlugin { + public RangerHBasePlugin(String appType) { + super("hbase", appType); + } + + public void init() { + super.init(); + } +} + +
http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/ebe83454/hbase-agent/src/test/java/org/apache/hadoop/hbase/security/access/RangerAccessControlListsTest.java ---------------------------------------------------------------------- diff --git a/hbase-agent/src/test/java/org/apache/hadoop/hbase/security/access/RangerAccessControlListsTest.java b/hbase-agent/src/test/java/org/apache/hadoop/hbase/security/access/RangerAccessControlListsTest.java index aa66d08..033d29e 100644 --- a/hbase-agent/src/test/java/org/apache/hadoop/hbase/security/access/RangerAccessControlListsTest.java +++ b/hbase-agent/src/test/java/org/apache/hadoop/hbase/security/access/RangerAccessControlListsTest.java @@ -45,7 +45,7 @@ public class RangerAccessControlListsTest { @After public void tearDown() throws Exception { } - + /* @Test public void testInit() { IOException exceptionFound = null ; @@ -57,5 +57,5 @@ public class RangerAccessControlListsTest { } Assert.assertFalse("Expected to get a NullPointerExecution after init method Execution - Found [" + exceptionFound + "]", (!(exceptionFound != null && exceptionFound.getCause() instanceof NullPointerException))) ; } - + */ } http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/ebe83454/hbase-agent/src/test/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorTest.java ---------------------------------------------------------------------- diff --git a/hbase-agent/src/test/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorTest.java b/hbase-agent/src/test/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorTest.java index 72b86d1..434f9f1 100644 --- a/hbase-agent/src/test/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorTest.java +++ b/hbase-agent/src/test/java/org/apache/ranger/authorization/hbase/RangerAuthorizationCoprocessorTest.java @@ -31,7 +31,7 @@ public class RangerAuthorizationCoprocessorTest { @Test public void test_canBeNewed() { - RangerAuthorizationCoprocessor _coprocessor = new RangerAuthorizationCoprocessor(); + RangerAuthorizationCoprocessorImpl _coprocessor = new RangerAuthorizationCoprocessorImpl(); assertNotNull(_coprocessor); } @@ -43,7 +43,7 @@ public class RangerAuthorizationCoprocessorTest { @Test public void test_getColumnFamilies_firewalling() { // passing null collection should return back an empty map - RangerAuthorizationCoprocessor _coprocessor = new RangerAuthorizationCoprocessor(); + RangerAuthorizationCoprocessorImpl _coprocessor = new RangerAuthorizationCoprocessorImpl(); Map<String, Set<String>> result = _coprocessor.getColumnFamilies(null); assertNotNull(result); assertTrue(result.isEmpty()); http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/ebe83454/hdfs-agent/conf/ranger-hdfs-security.xml ---------------------------------------------------------------------- diff --git a/hdfs-agent/conf/ranger-hdfs-security.xml b/hdfs-agent/conf/ranger-hdfs-security.xml index 10409d9..9700a69 100644 --- a/hdfs-agent/conf/ranger-hdfs-security.xml +++ b/hdfs-agent/conf/ranger-hdfs-security.xml @@ -64,6 +64,22 @@ Directory where Ranger policies are cached after successful retrieval from the source </description> </property> + + <property> + <name>ranger.policy.rest.client.connection.timeoutMs</name> + <value>120000</value> + <description> + RangerRestClient Connection Timeout in Milli Seconds + </description> + </property> + + <property> + <name>ranger.policy.rest.client.read.timeoutMs</name> + <value>30000</value> + <description> + RangerRestClient read Timeout in Milli Seconds + </description> + </property> <property> <name>ranger.plugin.hdfs.policy.rest.client.connection.timeoutMs</name> @@ -114,4 +130,7 @@ rwxrwxrwx permission on the resource) if Ranger Authorization fails. </description> </property> + + + </configuration> http://git-wip-us.apache.org/repos/asf/incubator-ranger/blob/ebe83454/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java ---------------------------------------------------------------------- diff --git a/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java b/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java deleted file mode 100644 index fa2155c..0000000 --- a/hdfs-agent/src/main/java/org/apache/ranger/authorization/hadoop/RangerHdfsAuthorizer.java +++ /dev/null @@ -1,538 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.apache.ranger.authorization.hadoop; - -import static org.apache.ranger.authorization.hadoop.constants.RangerHadoopConstants.EXECUTE_ACCCESS_TYPE; -import static org.apache.ranger.authorization.hadoop.constants.RangerHadoopConstants.READ_ACCCESS_TYPE; -import static org.apache.ranger.authorization.hadoop.constants.RangerHadoopConstants.WRITE_ACCCESS_TYPE; - -import java.net.InetAddress; -import java.util.HashMap; -import java.util.HashSet; -import java.util.Map; -import java.util.Set; -import java.util.Stack; - -import org.apache.commons.lang.ArrayUtils; -import org.apache.commons.lang.StringUtils; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.fs.permission.FsAction; -import org.apache.hadoop.hdfs.server.namenode.INode; -import org.apache.hadoop.hdfs.server.namenode.INodeAttributeProvider; -import org.apache.hadoop.hdfs.server.namenode.INodeAttributes; -import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; -import org.apache.hadoop.hdfs.util.ReadOnlyList; -import org.apache.hadoop.ipc.Server; -import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.UserGroupInformation; -import org.apache.ranger.audit.model.AuthzAuditEvent; -import org.apache.ranger.authorization.hadoop.config.RangerConfiguration; -import org.apache.ranger.authorization.hadoop.constants.RangerHadoopConstants; -import org.apache.ranger.authorization.hadoop.exceptions.RangerAccessControlException; -import org.apache.ranger.authorization.utils.StringUtil; -import org.apache.ranger.plugin.audit.RangerDefaultAuditHandler; -import org.apache.ranger.plugin.policyengine.RangerAccessRequest; -import org.apache.ranger.plugin.policyengine.RangerAccessRequestImpl; -import org.apache.ranger.plugin.policyengine.RangerAccessResource; -import org.apache.ranger.plugin.policyengine.RangerAccessResourceImpl; -import org.apache.ranger.plugin.policyengine.RangerAccessResult; -import org.apache.ranger.plugin.service.RangerBasePlugin; - -import com.google.common.collect.Sets; - -public class RangerHdfsAuthorizer extends INodeAttributeProvider { - private static final Log LOG = LogFactory.getLog(RangerHdfsAuthorizer.class); - - private RangerHdfsPlugin rangerPlugin = null; - private Map<FsAction, Set<String>> access2ActionListMapper = new HashMap<FsAction, Set<String>>(); - - public RangerHdfsAuthorizer() { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.RangerHdfsAuthorizer()"); - } - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.RangerHdfsAuthorizer()"); - } - } - - public void start() { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.start()"); - } - - RangerHdfsPlugin plugin = new RangerHdfsPlugin(); - plugin.init(); - - access2ActionListMapper.put(FsAction.NONE, new HashSet<String>()); - access2ActionListMapper.put(FsAction.ALL, Sets.newHashSet(READ_ACCCESS_TYPE, WRITE_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE)); - access2ActionListMapper.put(FsAction.READ, Sets.newHashSet(READ_ACCCESS_TYPE)); - access2ActionListMapper.put(FsAction.READ_WRITE, Sets.newHashSet(READ_ACCCESS_TYPE, WRITE_ACCCESS_TYPE)); - access2ActionListMapper.put(FsAction.READ_EXECUTE, Sets.newHashSet(READ_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE)); - access2ActionListMapper.put(FsAction.WRITE, Sets.newHashSet(WRITE_ACCCESS_TYPE)); - access2ActionListMapper.put(FsAction.WRITE_EXECUTE, Sets.newHashSet(WRITE_ACCCESS_TYPE, EXECUTE_ACCCESS_TYPE)); - access2ActionListMapper.put(FsAction.EXECUTE, Sets.newHashSet(EXECUTE_ACCCESS_TYPE)); - - rangerPlugin = plugin; - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.start()"); - } - } - - public void stop() { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.stop()"); - } - - RangerHdfsPlugin plugin = rangerPlugin; - rangerPlugin = null; - - if(plugin != null) { - plugin.cleanup(); - } - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.stop()"); - } - } - - @Override - public INodeAttributes getAttributes(String fullPath, INodeAttributes inode) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getAttributes(" + fullPath + ")"); - } - - INodeAttributes ret = inode; // return default attributes - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getAttributes(" + fullPath + "): " + ret); - } - - return ret; - } - - @Override - public INodeAttributes getAttributes(String[] pathElements, INodeAttributes inode) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + ")"); - } - - INodeAttributes ret = inode; // return default attributes - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getAttributes(pathElementsCount=" + (pathElements == null ? 0 : pathElements.length) + "): " + ret); - } - - return ret; - } - - @Override - public AccessControlEnforcer getExternalAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuthorizer.getExternalAccessControlEnforcer()"); - } - - RangerAccessControlEnforcer rangerAce = new RangerAccessControlEnforcer(defaultEnforcer); - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuthorizer.getExternalAccessControlEnforcer()"); - } - - return rangerAce; - } - - - class RangerAccessControlEnforcer implements AccessControlEnforcer { - private INodeAttributeProvider.AccessControlEnforcer defaultEnforcer = null; - - public RangerAccessControlEnforcer(AccessControlEnforcer defaultEnforcer) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerAccessControlEnforcer.RangerAccessControlEnforcer()"); - } - - this.defaultEnforcer = defaultEnforcer; - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerAccessControlEnforcer.RangerAccessControlEnforcer()"); - } - } - - @Override - public void checkPermission(String fsOwner, String superGroup, UserGroupInformation ugi, - INodeAttributes[] inodeAttrs, INode[] inodes, byte[][] pathByNameArr, - int snapshotId, String path, int ancestorIndex, boolean doCheckOwner, - FsAction ancestorAccess, FsAction parentAccess, FsAction access, - FsAction subAccess, boolean ignoreEmptyDir) throws AccessControlException { - boolean accessGranted = false; - RangerHdfsPlugin plugin = rangerPlugin; - RangerHdfsAuditHandler auditHandler = null; - String user = ugi != null ? ugi.getShortUserName() : null; - Set<String> groups = ugi != null ? Sets.newHashSet(ugi.getGroupNames()) : null; - - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerAccessControlEnforcer.checkPermission(" - + "fsOwner=" + fsOwner + "; superGroup=" + superGroup + ", inodesCount=" + (inodes != null ? inodes.length : 0) - + ", snapshotId=" + snapshotId + ", user=" + user + ", path=" + path + ", ancestorIndex=" + ancestorIndex - + ", doCheckOwner="+ doCheckOwner + ", ancestorAccess=" + ancestorAccess + ", parentAccess=" + parentAccess - + ", access=" + access + ", subAccess=" + subAccess + ", ignoreEmptyDir=" + ignoreEmptyDir + ")"); - } - - try { - if(plugin != null && !ArrayUtils.isEmpty(inodes)) { - auditHandler = new RangerHdfsAuditHandler(path); - - if(ancestorIndex >= inodes.length) { - ancestorIndex = inodes.length - 1; - } - - for(; ancestorIndex >= 0 && inodes[ancestorIndex] == null; ancestorIndex--); - - accessGranted = true; - - INode ancestor = inodes.length > ancestorIndex && ancestorIndex >= 0 ? inodes[ancestorIndex] : null; - INode parent = inodes.length > 1 ? inodes[inodes.length - 2] : null; - INode inode = inodes[inodes.length - 1]; - - boolean noAccessToCheck = access == null && parentAccess == null && ancestorAccess == null && subAccess == null; - - if(noAccessToCheck) { // check for traverse (EXECUTE) access on the path (if path is a directory) or its parent (if path is a file) - INode node = null; - INodeAttributes nodeAttribs = null; - - if(inode != null && inode.isDirectory()) { - node = inode; - nodeAttribs = inodeAttrs.length > 0 ? inodeAttrs[inodeAttrs.length - 1] : null; - } else if(parent != null) { - node = parent; - nodeAttribs = inodeAttrs.length > 1 ? inodeAttrs[inodeAttrs.length - 2] : null; - } - - if(node != null) { - accessGranted = isAccessAllowed(node, nodeAttribs, FsAction.EXECUTE, user, groups, fsOwner, superGroup, plugin, null); - } - } - - // checkStickyBit - if (accessGranted && parentAccess != null && parentAccess.implies(FsAction.WRITE) && parent != null && inode != null) { - if (parent.getFsPermission() != null && parent.getFsPermission().getStickyBit()) { - // user should be owner of the parent or the inode - accessGranted = StringUtils.equals(parent.getUserName(), user) || StringUtils.equals(inode.getUserName(), user); - } - } - - // checkAncestorAccess - if(accessGranted && ancestorAccess != null && ancestor != null) { - INodeAttributes ancestorAttribs = inodeAttrs.length > ancestorIndex ? inodeAttrs[ancestorIndex] : null; - - accessGranted = isAccessAllowed(ancestor, ancestorAttribs, ancestorAccess, user, groups, fsOwner, superGroup, plugin, auditHandler); - } - - // checkParentAccess - if(accessGranted && parentAccess != null && parent != null) { - INodeAttributes parentAttribs = inodeAttrs.length > 1 ? inodeAttrs[inodeAttrs.length - 2] : null; - - accessGranted = isAccessAllowed(parent, parentAttribs, parentAccess, user, groups, fsOwner, superGroup, plugin, auditHandler); - } - - // checkINodeAccess - if(accessGranted && access != null && inode != null) { - INodeAttributes inodeAttribs = inodeAttrs.length > 0 ? inodeAttrs[inodeAttrs.length - 1] : null; - - accessGranted = isAccessAllowed(inode, inodeAttribs, access, user, groups, fsOwner, superGroup, plugin, auditHandler); - } - - // checkSubAccess - if(accessGranted && subAccess != null && inode != null && inode.isDirectory()) { - Stack<INodeDirectory> directories = new Stack<INodeDirectory>(); - - for(directories.push(inode.asDirectory()); !directories.isEmpty(); ) { - INodeDirectory dir = directories.pop(); - ReadOnlyList<INode> cList = dir.getChildrenList(snapshotId); - - if (!(cList.isEmpty() && ignoreEmptyDir)) { - INodeAttributes dirAttribs = dir.getSnapshotINode(snapshotId); - - accessGranted = isAccessAllowed(dir, dirAttribs, subAccess, user, groups, fsOwner, superGroup, plugin, auditHandler); - - if(! accessGranted) { - break; - } - } - - for(INode child : cList) { - if (child.isDirectory()) { - directories.push(child.asDirectory()); - } - } - } - } - - // checkOwnerAccess - if(accessGranted && doCheckOwner) { - INodeAttributes inodeAttribs = inodeAttrs.length > 0 ? inodeAttrs[inodeAttrs.length - 1] : null; - String owner = inodeAttribs != null ? inodeAttribs.getUserName() : null; - - accessGranted = StringUtils.equals(user, owner); - } - } - - if(! accessGranted && RangerHdfsPlugin.isHadoopAuthEnabled() && defaultEnforcer != null) { - try { - defaultEnforcer.checkPermission(fsOwner, superGroup, ugi, inodeAttrs, inodes, - pathByNameArr, snapshotId, path, ancestorIndex, doCheckOwner, - ancestorAccess, parentAccess, access, subAccess, ignoreEmptyDir); - - accessGranted = true; - } finally { - if(auditHandler != null) { - FsAction action = access; - - if(action == null) { - if(parentAccess != null) { - action = parentAccess; - } else if(ancestorAccess != null) { - action = ancestorAccess; - } else if(subAccess != null) { - action = subAccess; - } else { - action = FsAction.NONE; - } - } - - auditHandler.logHadoopEvent(path, action, accessGranted); - } - } - } - - if(! accessGranted) { - throw new RangerAccessControlException("Permission denied: principal{user=" + user + ",groups: " + groups + "}, access=" + access + ", " + path) ; - } - } finally { - if(auditHandler != null) { - auditHandler.flushAudit(); - } - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerAccessControlEnforcer.checkPermission(" + path + ", " + access + ", user=" + user + ") : " + accessGranted); - } - } - } - - private boolean isAccessAllowed(INode inode, INodeAttributes inodeAttribs, FsAction access, String user, Set<String> groups, String fsOwner, String superGroup, RangerHdfsPlugin plugin, RangerHdfsAuditHandler auditHandler) { - boolean ret = false; - String path = inode != null ? inode.getFullPathName() : null; - String pathOwner = inodeAttribs != null ? inodeAttribs.getUserName() : null; - - if(pathOwner == null && inode != null) { - pathOwner = inode.getUserName(); - } - - if (RangerHadoopConstants.HDFS_ROOT_FOLDER_PATH_ALT.equals(path)) { - path = RangerHadoopConstants.HDFS_ROOT_FOLDER_PATH; - } - - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + ")"); - } - - Set<String> accessTypes = access2ActionListMapper.get(access); - - if(accessTypes == null) { - LOG.warn("RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + "): no Ranger accessType found for " + access); - - accessTypes = access2ActionListMapper.get(FsAction.NONE); - } - - for(String accessType : accessTypes) { - RangerHdfsAccessRequest request = new RangerHdfsAccessRequest(path, pathOwner, access, accessType, user, groups); - - RangerAccessResult result = plugin.isAccessAllowed(request, auditHandler); - - if (result == null) { - LOG.error("RangerAccessControlEnforcer: Internal error: null RangerAccessResult object received back from isAccessAllowed()!"); - } else { - ret = result.getIsAllowed(); - - if (!ret) { - break; - } - } - } - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerAccessControlEnforcer.isAccessAllowed(" + path + ", " + access + ", " + user + "): " + ret); - } - - return ret; - } - } -} - - -class RangerHdfsPlugin extends RangerBasePlugin { - private static boolean hadoopAuthEnabled = RangerHadoopConstants.RANGER_ADD_HDFS_PERMISSION_DEFAULT; - - public RangerHdfsPlugin() { - super("hdfs", "hdfs"); - } - - public void init() { - super.init(); - - RangerHdfsPlugin.hadoopAuthEnabled = RangerConfiguration.getInstance().getBoolean(RangerHadoopConstants.RANGER_ADD_HDFS_PERMISSION_PROP, RangerHadoopConstants.RANGER_ADD_HDFS_PERMISSION_DEFAULT); - } - - public static boolean isHadoopAuthEnabled() { - return RangerHdfsPlugin.hadoopAuthEnabled; - } -} - -class RangerHdfsResource extends RangerAccessResourceImpl { - private static final String KEY_PATH = "path"; - - - public RangerHdfsResource(String path, String owner) { - super.setValue(KEY_PATH, path); - super.setOwnerUser(owner); - } -} - -class RangerHdfsAccessRequest extends RangerAccessRequestImpl { - public RangerHdfsAccessRequest(String path, String pathOwner, FsAction access, String accessType, String user, Set<String> groups) { - super.setResource(new RangerHdfsResource(path, pathOwner)); - super.setAccessType(accessType); - super.setUser(user); - super.setUserGroups(groups); - super.setAccessTime(StringUtil.getUTCDate()); - super.setClientIPAddress(getRemoteIp()); - super.setAction(access.toString()); - } - - private static String getRemoteIp() { - String ret = null ; - InetAddress ip = Server.getRemoteIp() ; - if (ip != null) { - ret = ip.getHostAddress(); - } - return ret ; - } -} - -class RangerHdfsAuditHandler extends RangerDefaultAuditHandler { - private static final Log LOG = LogFactory.getLog(RangerHdfsAuditHandler.class); - - private boolean isAuditEnabled = false; - private AuthzAuditEvent auditEvent = null; - - private static final String RangerModuleName = RangerConfiguration.getInstance().get(RangerHadoopConstants.AUDITLOG_RANGER_MODULE_ACL_NAME_PROP , RangerHadoopConstants.DEFAULT_RANGER_MODULE_ACL_NAME) ; - private static final String HadoopModuleName = RangerConfiguration.getInstance().get(RangerHadoopConstants.AUDITLOG_HADOOP_MODULE_ACL_NAME_PROP , RangerHadoopConstants.DEFAULT_HADOOP_MODULE_ACL_NAME) ; - private static final String excludeUserList = RangerConfiguration.getInstance().get(RangerHadoopConstants.AUDITLOG_HDFS_EXCLUDE_LIST_PROP, RangerHadoopConstants.AUDITLOG_EMPTY_STRING) ; - private static HashSet<String> excludeUsers = null ; - - static { - if (excludeUserList != null && excludeUserList.trim().length() > 0) { - excludeUsers = new HashSet<String>() ; - for(String excludeUser : excludeUserList.trim().split(",")) { - excludeUser = excludeUser.trim() ; - if (LOG.isDebugEnabled()) { - LOG.debug("Adding exclude user [" + excludeUser + "]"); - } - excludeUsers.add(excludeUser) ; - } - } - } - - public RangerHdfsAuditHandler(String pathToBeValidated) { - auditEvent = new AuthzAuditEvent(); - auditEvent.setResourcePath(pathToBeValidated); - } - - @Override - public void processResult(RangerAccessResult result) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuditHandler.logAudit(" + result + ")"); - } - - if(! isAuditEnabled && result.getIsAudited()) { - isAuditEnabled = true; - } - - RangerAccessRequest request = result.getAccessRequest(); -// RangerServiceDef serviceDef = result.getServiceDef(); - RangerAccessResource resource = request.getResource(); - String resourceType = resource != null ? resource.getLeafName() : null; - String resourcePath = resource != null ? resource.getAsString() : null; - - auditEvent.setUser(request.getUser()); - auditEvent.setResourceType(resourceType) ; - auditEvent.setAccessType(request.getAction()); - auditEvent.setAccessResult((short)(result.getIsAllowed() ? 1 : 0)); - auditEvent.setClientIP(request.getClientIPAddress()); - auditEvent.setEventTime(request.getAccessTime()); - auditEvent.setAclEnforcer(RangerModuleName); - auditEvent.setPolicyId(result.getPolicyId()); - auditEvent.setRepositoryType(result.getServiceType()); - auditEvent.setRepositoryName(result.getServiceName()); - auditEvent.setResultReason(resourcePath); - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuditHandler.logAudit(" + result + "): " + auditEvent); - } - } - - public void logHadoopEvent(String path, FsAction action, boolean accessGranted) { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuditHandler.logHadoopEvent(" + path + ", " + action + ", " + accessGranted + ")"); - } - - auditEvent.setResultReason(path); - auditEvent.setAccessResult((short) (accessGranted ? 1 : 0)); - auditEvent.setAccessType(action == null ? null : action.toString()); - auditEvent.setAclEnforcer(HadoopModuleName); - auditEvent.setPolicyId(-1); - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuditHandler.logHadoopEvent(" + path + ", " + action + ", " + accessGranted + "): " + auditEvent); - } - } - - public void flushAudit() { - if(LOG.isDebugEnabled()) { - LOG.debug("==> RangerHdfsAuditHandler.flushAudit(" + isAuditEnabled + ", " + auditEvent + ")"); - } - - if(isAuditEnabled && !StringUtils.isEmpty(auditEvent.getAccessType())) { - String username = auditEvent.getUser(); - - boolean skipLog = (username != null && excludeUsers != null && excludeUsers.contains(username)) ; - - if (! skipLog) { - super.logAuthzAudit(auditEvent); - } - } - - if(LOG.isDebugEnabled()) { - LOG.debug("<== RangerHdfsAuditHandler.flushAudit(" + isAuditEnabled + ", " + auditEvent + ")"); - } - } -} -
