Repository: phoenix
Updated Branches:
  refs/heads/system-catalog ae359fec1 -> 15d4dace4


http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
new file mode 100644
index 0000000..8437b37
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixAccessController.java
@@ -0,0 +1,628 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.io.IOException;
+import java.net.InetAddress;
+import java.security.PrivilegedExceptionAction;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.ClusterConnection;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.coprocessor.BaseMasterAndRegionObserver;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
+import org.apache.hadoop.hbase.ipc.RpcServer;
+import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
+import org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos;
+import 
org.apache.hadoop.hbase.protobuf.generated.AccessControlProtos.AccessControlService;
+import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
+import org.apache.hadoop.hbase.security.AccessDeniedException;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.access.AccessControlClient;
+import org.apache.hadoop.hbase.security.access.AuthResult;
+import org.apache.hadoop.hbase.security.access.Permission;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import org.apache.hadoop.hbase.security.access.UserPermission;
+import org.apache.hadoop.hbase.util.Bytes;
+import 
org.apache.phoenix.coprocessor.PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+import org.apache.phoenix.util.MetaDataUtil;
+
+import com.google.common.collect.Lists;
+import com.google.protobuf.RpcCallback;
+
+public class PhoenixAccessController extends BaseMetaDataEndpointObserver {
+
+    private PhoenixMetaDataControllerEnvironment env;
+    private ArrayList<BaseMasterAndRegionObserver> accessControllers;
+    private boolean accessCheckEnabled;
+    private UserProvider userProvider;
+    private boolean isAutomaticGrantEnabled;
+    private boolean isStrictMode;
+    public static final Log LOG = 
LogFactory.getLog(PhoenixAccessController.class);
+    private static final Log AUDITLOG =
+            
LogFactory.getLog("SecurityLogger."+PhoenixAccessController.class.getName());
+
+    private List<BaseMasterAndRegionObserver> getAccessControllers() throws 
IOException {
+        if (accessControllers == null) {
+            synchronized (this) {
+                if (accessControllers == null) {
+                    accessControllers = new 
ArrayList<BaseMasterAndRegionObserver>();
+                    RegionCoprocessorHost cpHost = 
this.env.getCoprocessorHost();
+                    List<BaseMasterAndRegionObserver> coprocessors = cpHost
+                            
.findCoprocessors(BaseMasterAndRegionObserver.class);
+                    for (BaseMasterAndRegionObserver cp : coprocessors) {
+                        if (cp instanceof AccessControlService.Interface) {
+                            accessControllers.add(cp);
+                        }
+                    }
+                }
+            }
+        }
+        return accessControllers;
+    }
+
+    @Override
+    public void 
preGetTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
tenantId,
+            String tableName, TableName physicalTableName) throws IOException {
+        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
+            observer.preGetTableDescriptors(new 
ObserverContext<MasterCoprocessorEnvironment>(),
+                    Lists.newArrayList(physicalTableName), 
Collections.<HTableDescriptor> emptyList());
+        }
+    }
+
+    @Override
+    public void start(CoprocessorEnvironment env) throws IOException {
+        Configuration conf = env.getConfiguration();
+        this.accessCheckEnabled = 
conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
+                QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
+        
this.isAutomaticGrantEnabled=conf.getBoolean(QueryServices.PHOENIX_AUTOMATIC_GRANT_ENABLED,
+                QueryServicesOptions.DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED);
+        if (!this.accessCheckEnabled) {
+            LOG.warn("PhoenixAccessController has been loaded with 
authorization checks disabled.");
+        }
+        if (env instanceof PhoenixMetaDataControllerEnvironment) {
+            this.env = (PhoenixMetaDataControllerEnvironment)env;
+        } else {
+            throw new IllegalArgumentException(
+                    "Not a valid environment, should be loaded by 
PhoenixMetaDataControllerEnvironment");
+        }
+        // set the user-provider.
+        this.userProvider = UserProvider.instantiate(env.getConfiguration());
+        this.isStrictMode = 
conf.getBoolean(QueryServices.PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED,
+                
QueryServicesOptions.DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED);
+        // init superusers and add the server principal (if using security)
+        // or process owner as default super user.
+        Superusers.initialize(env.getConfiguration());
+    }
+
+    @Override
+    public void stop(CoprocessorEnvironment env) throws IOException {}
+
+    @Override
+    public void 
preCreateTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, 
String tenantId,
+            String tableName, TableName physicalTableName, TableName 
parentPhysicalTableName, PTableType tableType,
+            Set<byte[]> familySet, Set<TableName> indexes) throws IOException {
+        if (!accessCheckEnabled) { return; }
+        
+        if (tableType != PTableType.VIEW) {
+            final HTableDescriptor htd = new 
HTableDescriptor(physicalTableName);
+            for (byte[] familyName : familySet) {
+                htd.addFamily(new HColumnDescriptor(familyName));
+            }
+            for (BaseMasterAndRegionObserver observer : 
getAccessControllers()) {
+                observer.preCreateTable(new 
ObserverContext<MasterCoprocessorEnvironment>(), htd, null);
+            }
+        }
+
+        // Index and view require read access on parent physical table.
+        Set<TableName> physicalTablesChecked = new HashSet<TableName>();
+        if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
+            physicalTablesChecked.add(parentPhysicalTableName);
+            requireAccess("Create" + tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+        }
+
+        if (tableType == PTableType.VIEW) {
+            
+            Action[] requiredActions = { Action.READ, Action.EXEC };
+            for (TableName index : indexes) {
+                if (!physicalTablesChecked.add(index)) {
+                    // skip check for local index as we have already check the 
ACLs above
+                    // And for same physical table multiple times like view 
index table
+                    continue;
+                }
+
+                User user = getActiveUser();
+                List<UserPermission> permissionForUser = getPermissionForUser(
+                        getUserPermissions(index.getNameAsString()), 
Bytes.toBytes(user.getShortName()));
+                Set<Action> requireAccess = new HashSet<>();
+                Set<Action> accessExists = new HashSet<>();
+                if (permissionForUser != null) {
+                    for (UserPermission userPermission : permissionForUser) {
+                        for (Action action : Arrays.asList(requiredActions)) {
+                            if (!userPermission.implies(action)) {
+                                requireAccess.add(action);
+                            }
+                        }
+                    }
+                    if (!requireAccess.isEmpty()) {
+                        for (UserPermission userPermission : 
permissionForUser) {
+                            
accessExists.addAll(Arrays.asList(userPermission.getActions()));
+                        }
+
+                    }
+                } else {
+                    requireAccess.addAll(Arrays.asList(requiredActions));
+                }
+                if (!requireAccess.isEmpty()) {
+                    byte[] indexPhysicalTable = index.getName();
+                    handleRequireAccessOnDependentTable("Create" + tableType, 
user.getName(),
+                            TableName.valueOf(indexPhysicalTable), tableName, 
requireAccess, accessExists);
+                }
+            }
+
+        }
+
+        if (tableType == PTableType.INDEX) {
+            // All the users who have READ access on data table should have 
access to Index table as well.
+            // WRITE is needed for the index updates done by the user who has 
WRITE access on data table.
+            // CREATE is needed during the drop of the table.
+            // We are doing this because existing user while querying data 
table should not see access denied for the
+            // new indexes.
+            // TODO: confirm whether granting permission from coprocessor is a 
security leak.(currently it is done if
+            // automatic grant is enabled explicitly by user in configuration
+            // skip check for local index
+            if (physicalTableName != null && 
!parentPhysicalTableName.equals(physicalTableName)
+                    && 
!MetaDataUtil.isViewIndex(physicalTableName.getNameAsString())) {
+                authorizeOrGrantAccessToUsers("Create" + tableType, 
parentPhysicalTableName,
+                        Arrays.asList(Action.READ, Action.WRITE, 
Action.CREATE, Action.EXEC, Action.ADMIN),
+                        physicalTableName);
+            }
+        }
+    }
+
+    
+    public void handleRequireAccessOnDependentTable(String request, String 
userName, TableName dependentTable,
+            String requestTable, Set<Action> requireAccess, Set<Action> 
accessExists) throws IOException {
+
+        if (!isStrictMode) {
+            AUDITLOG.warn("Strict mode is not enabled, so " + request + " is 
allowed but User:" + userName
+                    + " will not have following access " + requireAccess + " 
to the existing dependent physical table "
+                    + dependentTable);
+            return;
+        }
+        if (isAutomaticGrantEnabled) {
+            Set<Action> unionSet = new HashSet<Action>();
+            unionSet.addAll(requireAccess);
+            unionSet.addAll(accessExists);
+            AUDITLOG.info(request + ": Automatically granting access to index 
table during creation of view:"
+                    + requestTable + authString(userName, dependentTable, 
requireAccess));
+            grantPermissions(userName, dependentTable.getName(), 
unionSet.toArray(new Action[0]));
+        } else {
+            throw new AccessDeniedException(
+                    "Insufficient permissions for users of dependent table" + 
authString(userName, dependentTable, requireAccess));
+        }
+    }
+    
+    private void grantPermissions(final String toUser, final byte[] table, 
final Action... actions) throws IOException {
+        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                try (Connection conn = 
ConnectionFactory.createConnection(env.getConfiguration())) {
+                    AccessControlClient.grant(conn, TableName.valueOf(table), 
toUser , null, null,
+                            actions);
+                } catch (Throwable e) {
+                    new DoNotRetryIOException(e);
+                }
+                return null;
+            }
+        });
+    }
+
+    private void authorizeOrGrantAccessToUsers(final String request, final 
TableName fromTable,
+            final List<Action> requiredActionsOnTable, final TableName toTable)
+            throws IOException {
+        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws IOException {
+                try (Connection conn = 
ConnectionFactory.createConnection(env.getConfiguration())) {
+                    List<UserPermission> userPermissions = 
getUserPermissions(fromTable.getNameAsString());
+                    List<UserPermission> permissionsOnTheTable = 
getUserPermissions(toTable.getNameAsString());
+                    if (userPermissions != null) {
+                        for (UserPermission userPermission : userPermissions) {
+                            Set<Action> requireAccess = new HashSet<Action>();
+                            Set<Action> accessExists = new HashSet<Action>();
+                            List<UserPermission> permsToTable = 
getPermissionForUser(permissionsOnTheTable,
+                                    userPermission.getUser());
+                            for (Action action : requiredActionsOnTable) {
+                                boolean haveAccess=false;
+                                if (userPermission.implies(action)) {
+                                    if (permsToTable == null) {
+                                        requireAccess.add(action);
+                                    } else {
+                                        for (UserPermission permToTable : 
permsToTable) {
+                                            if (permToTable.implies(action)) {
+                                                haveAccess=true;
+                                            }
+                                        }
+                                        if (!haveAccess) {
+                                            requireAccess.add(action);
+                                        }
+                                    }
+                                }
+                            }
+                            if (permsToTable != null) {
+                                // Append access to already existing access 
for the user
+                                for (UserPermission permToTable : 
permsToTable) {
+                                    
accessExists.addAll(Arrays.asList(permToTable.getActions()));
+                                }
+                            }
+                            if (!requireAccess.isEmpty()) {
+                                
if(AuthUtil.isGroupPrincipal(Bytes.toString(userPermission.getUser()))){
+                                    AUDITLOG.warn("Users of GROUP:" + 
Bytes.toString(userPermission.getUser())
+                                            + " will not have following access 
" + requireAccess
+                                            + " to the newly created index " + 
toTable
+                                            + ", Automatic grant is not yet 
allowed on Groups");
+                                    continue;
+                                }
+                                handleRequireAccessOnDependentTable(request, 
Bytes.toString(userPermission.getUser()),
+                                        toTable, toTable.getNameAsString(), 
requireAccess, accessExists);
+                            }
+                        }
+                    }
+                }
+                return null;
+            }
+        });
+    }
+
+    private List<UserPermission> getPermissionForUser(List<UserPermission> 
perms, byte[] user) {
+        if (perms != null) {
+            // get list of permissions for the user as multiple implementation 
of AccessControl coprocessors can give
+            // permissions for same users
+            List<UserPermission> permissions = new ArrayList<>();
+            for (UserPermission p : perms) {
+                if (Bytes.equals(p.getUser(),user)){
+                     permissions.add(p);
+                }
+            }
+            if (!permissions.isEmpty()){
+               return permissions;
+            }
+        }
+        return null;
+    }
+
+    @Override
+    public void 
preDropTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
tenantId,
+            String tableName, TableName physicalTableName, TableName 
parentPhysicalTableName, PTableType tableType,
+            List<PTable> indexes) throws IOException {
+        if (!accessCheckEnabled) { return; }
+
+        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
+            if (tableType != PTableType.VIEW) {
+                observer.preDeleteTable(new 
ObserverContext<MasterCoprocessorEnvironment>(), physicalTableName);
+            }
+            if (indexes != null) {
+                for (PTable index : indexes) {
+                    observer.preDeleteTable(new 
ObserverContext<MasterCoprocessorEnvironment>(),
+                            
TableName.valueOf(index.getPhysicalName().getBytes()));
+                }
+            }
+        }
+        //checking similar permission checked during the create of the view.
+        if (tableType == PTableType.VIEW || tableType == PTableType.INDEX) {
+            requireAccess("Drop "+tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+        }
+    }
+
+    @Override
+    public void 
preAlterTable(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
tenantId,
+            String tableName, TableName physicalTableName, TableName 
parentPhysicalTableName, PTableType tableType) throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
+            if (tableType != PTableType.VIEW) {
+            observer.preModifyTable(new 
ObserverContext<MasterCoprocessorEnvironment>(), physicalTableName,
+                    new HTableDescriptor(physicalTableName));
+            }
+        }
+        if (tableType == PTableType.VIEW) {
+            requireAccess("Alter "+tableType, parentPhysicalTableName, 
Action.READ, Action.EXEC);
+        }
+    }
+
+    @Override
+    public void 
preGetSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
schemaName)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
+            observer.preListNamespaceDescriptors(new 
ObserverContext<MasterCoprocessorEnvironment>(),
+                    
Arrays.asList(NamespaceDescriptor.create(schemaName).build()));
+        }
+    }
+
+    @Override
+    public void 
preCreateSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, 
String schemaName)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
+            observer.preCreateNamespace(new 
ObserverContext<MasterCoprocessorEnvironment>(),
+                    NamespaceDescriptor.create(schemaName).build());
+        }
+    }
+
+    @Override
+    public void 
preDropSchema(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, String 
schemaName)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
+            observer.preDeleteNamespace(new 
ObserverContext<MasterCoprocessorEnvironment>(), schemaName);
+        }
+    }
+
+    @Override
+    public void 
preIndexUpdate(ObserverContext<PhoenixMetaDataControllerEnvironment> ctx, 
String tenantId,
+            String indexName, TableName physicalTableName, TableName 
parentPhysicalTableName, PIndexState newState)
+            throws IOException {
+        if (!accessCheckEnabled) { return; }
+        for (BaseMasterAndRegionObserver observer : getAccessControllers()) {
+            observer.preModifyTable(new 
ObserverContext<MasterCoprocessorEnvironment>(), physicalTableName,
+                    new HTableDescriptor(physicalTableName));
+        }
+        // Check for read access in case of rebuild
+        if (newState == PIndexState.BUILDING) {
+            requireAccess("Rebuild:", parentPhysicalTableName, Action.READ, 
Action.EXEC);
+        }
+    }
+
+    private List<UserPermission> getUserPermissions(final String tableName) 
throws IOException {
+        return User.runAsLoginUser(new 
PrivilegedExceptionAction<List<UserPermission>>() {
+            @Override
+            public List<UserPermission> run() throws Exception {
+                final List<UserPermission> userPermissions = new 
ArrayList<UserPermission>();
+                try (Connection connection = 
ConnectionFactory.createConnection(env.getConfiguration())) {
+                    for (BaseMasterAndRegionObserver service : 
accessControllers) {
+                        if 
(service.getClass().getName().equals(org.apache.hadoop.hbase.security.access.AccessController.class.getName()))
 {
+                            
userPermissions.addAll(AccessControlClient.getUserPermissions(connection, 
tableName));
+                        } else {
+                            
AccessControlProtos.GetUserPermissionsRequest.Builder builder = 
AccessControlProtos.GetUserPermissionsRequest
+                                    .newBuilder();
+                            
builder.setTableName(ProtobufUtil.toProtoTableName(TableName.valueOf(tableName)));
+                            
builder.setType(AccessControlProtos.Permission.Type.Table);
+                            AccessControlProtos.GetUserPermissionsRequest 
request = builder.build();
+
+                            PayloadCarryingRpcController controller = 
((ClusterConnection)connection)
+                                    .getRpcControllerFactory().newController();
+                            
((AccessControlService.Interface)service).getUserPermissions(controller, 
request,
+                                    new 
RpcCallback<AccessControlProtos.GetUserPermissionsResponse>() {
+                                        @Override
+                                        public void 
run(AccessControlProtos.GetUserPermissionsResponse message) {
+                                            if (message != null) {
+                                                for 
(AccessControlProtos.UserPermission perm : message
+                                                        
.getUserPermissionList()) {
+                                                    
userPermissions.add(ProtobufUtil.toUserPermission(perm));
+                                                }
+                                            }
+                                        }
+                                    });
+                        }
+                    }
+                } catch (Throwable e) {
+                    if (e instanceof Exception) {
+                        throw (Exception) e;
+                    } else if (e instanceof Error) {
+                        throw (Error) e;
+                    }
+                    throw new Exception(e);
+                }
+                return userPermissions;
+            }
+        });
+    }
+    
+    /**
+     * Authorizes that the current user has all the given permissions for the
+     * given table
+     * @param tableName Table requested
+     * @throws IOException if obtaining the current user fails
+     * @throws AccessDeniedException if user has no authorization
+     */
+    private void requireAccess(String request, TableName tableName, Action... 
permissions) throws IOException {
+        User user = getActiveUser();
+        AuthResult result = null;
+        List<Action> requiredAccess = new ArrayList<Action>();
+        for (Action permission : permissions) {
+            if (hasAccess(getUserPermissions(tableName.getNameAsString()), 
tableName, permission, user)) {
+                result = AuthResult.allow(request, "Table permission granted", 
user, permission, tableName, null, null);
+            } else {
+                result = AuthResult.deny(request, "Insufficient permissions", 
user, permission, tableName, null, null);
+                requiredAccess.add(permission);
+            }
+            logResult(result);
+        }
+        if (!requiredAccess.isEmpty()) {
+            result = AuthResult.deny(request, "Insufficient permissions", 
user, requiredAccess.get(0), tableName, null,
+                    null);
+        }
+        if (!result.isAllowed()) { throw new 
AccessDeniedException("Insufficient permissions "
+                + authString(user.getName(), tableName, new 
HashSet<Permission.Action>(Arrays.asList(permissions)))); }
+    }
+
+    /**
+     * Checks if the user has access to the table for the specified action.
+     *
+     * @param perms All table permissions
+     * @param table tablename
+     * @param action action for access is required
+     * @return true if the user has access to the table for specified action, 
false otherwise
+     */
+    private boolean hasAccess(List<UserPermission> perms, TableName table, 
Permission.Action action, User user) {
+        if (Superusers.isSuperUser(user)){
+            return true;
+        }
+        if (perms != null) {
+            List<UserPermission> permissionsForUser = 
getPermissionForUser(perms, user.getShortName().getBytes());
+            if (permissionsForUser != null) {
+                for (UserPermission permissionForUser : permissionsForUser) {
+                    if (permissionForUser.implies(action)) { return true; }
+                }
+            }
+            String[] groupNames = user.getGroupNames();
+            if (groupNames != null) {
+              for (String group : groupNames) {
+                List<UserPermission> groupPerms = 
getPermissionForUser(perms,(AuthUtil.toGroupEntry(group)).getBytes());
+                if (groupPerms != null) for (UserPermission permissionForUser 
: groupPerms) {
+                    if (permissionForUser.implies(action)) { return true; }
+                }
+              }
+            }
+        } else if (LOG.isDebugEnabled()) {
+            LOG.debug("No permissions found for table=" + table);
+        }
+        return false;
+    }
+
+    private User getActiveUser() throws IOException {
+        User user = RpcServer.getRequestUser();
+        if (user == null) {
+            // for non-rpc handling, fallback to system user
+            user = userProvider.getCurrent();
+        }
+        return user;
+    }
+
+    private void logResult(AuthResult result) {
+        if (AUDITLOG.isTraceEnabled()) {
+            InetAddress remoteAddr = RpcServer.getRemoteAddress();
+            AUDITLOG.trace("Access " + (result.isAllowed() ? "allowed" : 
"denied") + " for user "
+                    + (result.getUser() != null ? 
result.getUser().getShortName() : "UNKNOWN") + "; reason: "
+                    + result.getReason() + "; remote address: " + (remoteAddr 
!= null ? remoteAddr : "") + "; request: "
+                    + result.getRequest() + "; context: " + 
result.toContextString());
+        }
+    }
+
+    private static final class Superusers {
+        private static final Log LOG = LogFactory.getLog(Superusers.class);
+
+        /** Configuration key for superusers */
+        public static final String SUPERUSER_CONF_KEY = 
org.apache.hadoop.hbase.security.Superusers.SUPERUSER_CONF_KEY; // Not getting 
a name
+
+        private static List<String> superUsers;
+        private static List<String> superGroups;
+        private static User systemUser;
+
+        private Superusers(){}
+
+        /**
+         * Should be called only once to pre-load list of super users and super
+         * groups from Configuration. This operation is idempotent.
+         * @param conf configuration to load users from
+         * @throws IOException if unable to initialize lists of superusers or 
super groups
+         * @throws IllegalStateException if current user is null
+         */
+        public static void initialize(Configuration conf) throws IOException {
+            superUsers = new ArrayList<>();
+            superGroups = new ArrayList<>();
+            systemUser = User.getCurrent();
+
+            if (systemUser == null) {
+                throw new IllegalStateException("Unable to obtain the current 
user, "
+                    + "authorization checks for internal operations will not 
work correctly!");
+            }
+
+            if (LOG.isTraceEnabled()) {
+                LOG.trace("Current user name is " + systemUser.getShortName());
+            }
+            String currentUser = systemUser.getShortName();
+            String[] superUserList = conf.getStrings(SUPERUSER_CONF_KEY, new 
String[0]);
+            for (String name : superUserList) {
+                if (AuthUtil.isGroupPrincipal(name)) {
+                    superGroups.add(AuthUtil.getGroupName(name));
+                } else {
+                    superUsers.add(name);
+                }
+            }
+            superUsers.add(currentUser);
+        }
+
+        /**
+         * @return true if current user is a super user (whether as user 
running process,
+         * declared as individual superuser or member of supergroup), false 
otherwise.
+         * @param user to check
+         * @throws IllegalStateException if lists of superusers/super groups
+         *   haven't been initialized properly
+         */
+        public static boolean isSuperUser(User user) {
+            if (superUsers == null) {
+                throw new IllegalStateException("Super users/super groups 
lists"
+                    + " haven't been initialized properly.");
+            }
+            if (superUsers.contains(user.getShortName())) {
+                return true;
+            }
+
+            for (String group : user.getGroupNames()) {
+                if (superGroups.contains(group)) {
+                    return true;
+                }
+            }
+            return false;
+        }
+
+        public static List<String> getSuperUsers() {
+            return superUsers;
+        }
+
+        public static User getSystemUser() {
+            return systemUser;
+        }
+    }
+    
+    public String authString(String user, TableName table, Set<Action> 
actions) {
+        StringBuilder sb = new StringBuilder();
+        sb.append(" (user=").append(user != null ? user : "UNKNOWN").append(", 
");
+        sb.append("scope=").append(table == null ? "GLOBAL" : 
table.getNameWithNamespaceInclAsString()).append(", ");
+        sb.append(actions.size() > 1 ? "actions=" : "action=").append(actions 
!= null ? actions.toString() : "")
+                .append(")");
+        return sb.toString();
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
new file mode 100644
index 0000000..15b0020
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/PhoenixMetaDataCoprocessorHost.java
@@ -0,0 +1,236 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.io.IOException;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.ConcurrentMap;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Coprocessor;
+import org.apache.hadoop.hbase.CoprocessorEnvironment;
+import org.apache.hadoop.hbase.DoNotRetryIOException;
+import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.RegionCoprocessorHost;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.query.QueryServicesOptions;
+import org.apache.phoenix.schema.PIndexState;
+import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTableType;
+
+public class PhoenixMetaDataCoprocessorHost
+        extends 
CoprocessorHost<PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment>
 {
+    private RegionCoprocessorEnvironment env;
+    public static final String PHOENIX_META_DATA_COPROCESSOR_CONF_KEY =
+            "hbase.coprocessor.phoenix.classes";
+    public static final String 
DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY="org.apache.phoenix.coprocessor.PhoenixAccessController";
+
+    public PhoenixMetaDataCoprocessorHost(RegionCoprocessorEnvironment env) {
+        super(null);
+        this.env = env;
+        this.conf = env.getConfiguration();
+        boolean accessCheckEnabled = 
this.conf.getBoolean(QueryServices.PHOENIX_ACLS_ENABLED,
+                QueryServicesOptions.DEFAULT_PHOENIX_ACLS_ENABLED);
+        if (this.conf.get(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY) == null && 
accessCheckEnabled) {
+            this.conf.set(PHOENIX_META_DATA_COPROCESSOR_CONF_KEY, 
DEFAULT_PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
+        }
+        loadSystemCoprocessors(conf, PHOENIX_META_DATA_COPROCESSOR_CONF_KEY);
+    }
+
+    private static abstract class CoprocessorOperation<T extends 
CoprocessorEnvironment> extends ObserverContext<T> {
+        abstract void call(MetaDataEndpointObserver oserver, 
ObserverContext<T> ctx) throws IOException;
+
+        public void postEnvCall(T env) {}
+    }
+
+    private boolean execOperation(
+            final 
CoprocessorOperation<PhoenixMetaDataCoprocessorHost.PhoenixMetaDataControllerEnvironment>
 ctx)
+            throws IOException {
+        if (ctx == null) return false;
+        boolean bypass = false;
+        for (PhoenixMetaDataControllerEnvironment env : coprocessors) {
+            if (env.getInstance() instanceof MetaDataEndpointObserver) {
+                ctx.prepare(env);
+                Thread currentThread = Thread.currentThread();
+                ClassLoader cl = currentThread.getContextClassLoader();
+                try {
+                    currentThread.setContextClassLoader(env.getClassLoader());
+                    ctx.call((MetaDataEndpointObserver)env.getInstance(), ctx);
+                } catch (Throwable e) {
+                    handleCoprocessorThrowable(env, e);
+                } finally {
+                    currentThread.setContextClassLoader(cl);
+                }
+                bypass |= ctx.shouldBypass();
+                if (ctx.shouldComplete()) {
+                    break;
+                }
+            }
+            ctx.postEnvCall(env);
+        }
+        return bypass;
+    }
+    
+    @Override
+    protected void handleCoprocessorThrowable(final CoprocessorEnvironment 
env, final Throwable e) throws IOException {
+        if (e instanceof IOException) {
+            if (e.getCause() instanceof DoNotRetryIOException) { throw 
(IOException)e.getCause(); }
+        }
+        super.handleCoprocessorThrowable(env, e);
+    }
+
+    /**
+     * Encapsulation of the environment of each coprocessor
+     */
+    static class PhoenixMetaDataControllerEnvironment extends 
CoprocessorHost.Environment
+            implements RegionCoprocessorEnvironment {
+
+        private RegionCoprocessorEnvironment env;
+
+        public 
PhoenixMetaDataControllerEnvironment(RegionCoprocessorEnvironment env, 
Coprocessor instance,
+                int priority, int sequence, Configuration conf) {
+            super(instance, priority, sequence, conf);
+            this.env = env;
+        }
+
+        @Override
+        public RegionServerServices getRegionServerServices() {
+            return env.getRegionServerServices();
+        }
+
+        public RegionCoprocessorHost getCoprocessorHost() {
+            return env.getRegion().getCoprocessorHost();
+        }
+
+        @Override
+        public Region getRegion() {
+            return env.getRegion();
+        }
+
+        @Override
+        public HRegionInfo getRegionInfo() {
+            return env.getRegionInfo();
+        }
+
+        @Override
+        public ConcurrentMap<String, Object> getSharedData() {
+            return env.getSharedData();
+        }
+    }
+
+    @Override
+    public PhoenixMetaDataControllerEnvironment createEnvironment(Class<?> 
implClass, Coprocessor instance,
+            int priority, int sequence, Configuration conf) {
+        return new PhoenixMetaDataControllerEnvironment(env, instance, 
priority, sequence, conf);
+    }
+
+    public void preGetTable(final String tenantId, final String tableName, 
final TableName physicalTableName)
+            throws IOException {
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preGetTable(ctx, tenantId, tableName, 
physicalTableName);
+            }
+        });
+    }
+
+    public void preCreateTable(final String tenantId, final String tableName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PTableType 
tableType, final Set<byte[]> familySet, final Set<TableName> indexes)
+            throws IOException {
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preCreateTable(ctx, tenantId, tableName, 
physicalTableName, parentPhysicalTableName, tableType,
+                        familySet, indexes);
+            }
+        });
+    }
+
+    public void preDropTable(final String tenantId, final String tableName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PTableType 
tableType, final List<PTable> indexes) throws IOException {
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preDropTable(ctx, tenantId, tableName, 
physicalTableName, parentPhysicalTableName, tableType, indexes);
+            }
+        });
+    }
+
+    public void preAlterTable(final String tenantId, final String tableName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PTableType type) 
throws IOException {
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preAlterTable(ctx, tenantId, tableName, 
physicalTableName, parentPhysicalTableName, type);
+            }
+        });
+    }
+
+    public void preGetSchema(final String schemaName) throws IOException {
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preGetSchema(ctx, schemaName);
+            }
+        });
+    }
+
+    public void preCreateSchema(final String schemaName) throws IOException {
+
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preCreateSchema(ctx, schemaName);
+            }
+        });
+    }
+
+    public void preDropSchema(final String schemaName) throws IOException {
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preDropSchema(ctx, schemaName);
+            }
+        });
+    }
+
+    public void preIndexUpdate(final String tenantId, final String indexName, 
final TableName physicalTableName,
+            final TableName parentPhysicalTableName, final PIndexState 
newState) throws IOException {
+        execOperation(new 
CoprocessorOperation<PhoenixMetaDataControllerEnvironment>() {
+            @Override
+            public void call(MetaDataEndpointObserver observer,
+                    ObserverContext<PhoenixMetaDataControllerEnvironment> ctx) 
throws IOException {
+                observer.preIndexUpdate(ctx, tenantId, indexName, 
physicalTableName, parentPhysicalTableName, newState);
+            }
+        });
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
index cc2c6b3..3c69941 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/index/PhoenixIndexFailurePolicy.java
@@ -18,6 +18,7 @@
 package org.apache.phoenix.index;
 
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.sql.SQLException;
 import java.util.Collection;
 import java.util.Collections;
@@ -39,6 +40,7 @@ import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MetaDataMutationResult;
 import org.apache.phoenix.coprocessor.MetaDataProtocol.MutationCode;
@@ -160,12 +162,12 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
     }
 
     private long 
handleFailureWithExceptions(Multimap<HTableInterfaceReference, Mutation> 
attempted,
-            Exception cause) throws Throwable {
+            final Exception cause) throws Throwable {
         Set<HTableInterfaceReference> refs = attempted.asMap().keySet();
-        Map<String, Long> indexTableNames = new HashMap<String, 
Long>(refs.size());
+        final Map<String, Long> indexTableNames = new HashMap<String, 
Long>(refs.size());
         // start by looking at all the tables to which we attempted to write
         long timestamp = 0;
-        boolean leaveIndexActive = blockDataTableWritesOnFailure || 
!disableIndexOnFailure;
+        final boolean leaveIndexActive = blockDataTableWritesOnFailure || 
!disableIndexOnFailure;
         // if using TrackingParallelWriter, we know which indexes failed and 
only disable those
         Set<HTableInterfaceReference> failedTables = cause instanceof 
MultiIndexWriteFailureException 
                 ? new 
HashSet<HTableInterfaceReference>(((MultiIndexWriteFailureException)cause).getFailedTables())
@@ -209,55 +211,66 @@ public class PhoenixIndexFailurePolicy extends 
DelegateIndexFailurePolicy {
             return timestamp;
         }
 
-        PIndexState newState = disableIndexOnFailure ? PIndexState.DISABLE : 
PIndexState.PENDING_ACTIVE;
+        final PIndexState newState = disableIndexOnFailure ? 
PIndexState.DISABLE : PIndexState.PENDING_ACTIVE;
+        final long fTimestamp=timestamp;
         // for all the index tables that we've found, try to disable them and 
if that fails, try to
-        for (Map.Entry<String, Long> tableTimeElement 
:indexTableNames.entrySet()){
-            String indexTableName = tableTimeElement.getKey();
-            long minTimeStamp = tableTimeElement.getValue();
-            // We need a way of differentiating the block writes to data table 
case from
-            // the leave index active case. In either case, we need to know 
the time stamp
-            // at which writes started failing so we can rebuild from that 
point. If we
-            // keep the index active *and* have a positive 
INDEX_DISABLE_TIMESTAMP_BYTES,
-            // then writes to the data table will be blocked (this is client 
side logic
-            // and we can't change this in a minor release). So we use the 
sign of the
-            // time stamp to differentiate.
-            if (!disableIndexOnFailure && !blockDataTableWritesOnFailure) {
-                minTimeStamp *= -1;
-            }
-            // Disable the index by using the updateIndexState method of 
MetaDataProtocol end point coprocessor.
-            try (HTableInterface systemTable = env.getTable(SchemaUtil
-                    
.getPhysicalTableName(PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, 
env.getConfiguration()))) {
-                MetaDataMutationResult result = 
IndexUtil.updateIndexState(indexTableName, minTimeStamp,
-                        systemTable, newState);
-                if (result.getMutationCode() == MutationCode.TABLE_NOT_FOUND) {
-                    LOG.info("Index " + indexTableName + " has been dropped. 
Ignore uncommitted mutations");
-                    continue;
-                }
-                if (result.getMutationCode() != 
MutationCode.TABLE_ALREADY_EXISTS) {
-                    if (leaveIndexActive) {
-                        LOG.warn("Attempt to update INDEX_DISABLE_TIMESTAMP " 
+ " failed with code = "
-                                + result.getMutationCode());
-                        // If we're not disabling the index, then we don't 
want to throw as throwing
-                        // will lead to the RS being shutdown.
-                        if (blockDataTableWritesOnFailure) {
-                            throw new DoNotRetryIOException("Attempt to update 
INDEX_DISABLE_TIMESTAMP failed.");
+        return User.runAsLoginUser(new PrivilegedExceptionAction<Long>() {
+            @Override
+            public Long run() throws Exception {
+                for (Map.Entry<String, Long> tableTimeElement : 
indexTableNames.entrySet()) {
+                    String indexTableName = tableTimeElement.getKey();
+                    long minTimeStamp = tableTimeElement.getValue();
+                    // We need a way of differentiating the block writes to 
data table case from
+                    // the leave index active case. In either case, we need to 
know the time stamp
+                    // at which writes started failing so we can rebuild from 
that point. If we
+                    // keep the index active *and* have a positive 
INDEX_DISABLE_TIMESTAMP_BYTES,
+                    // then writes to the data table will be blocked (this is 
client side logic
+                    // and we can't change this in a minor release). So we use 
the sign of the
+                    // time stamp to differentiate.
+                    if (!disableIndexOnFailure && 
!blockDataTableWritesOnFailure) {
+                        minTimeStamp *= -1;
+                    }
+                    // Disable the index by using the updateIndexState method 
of MetaDataProtocol end point coprocessor.
+                    try (HTableInterface systemTable = 
env.getTable(SchemaUtil.getPhysicalTableName(
+                            PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES, 
env.getConfiguration()))) {
+                        MetaDataMutationResult result = 
IndexUtil.updateIndexState(indexTableName, minTimeStamp,
+                                systemTable, newState);
+                        if (result.getMutationCode() == 
MutationCode.TABLE_NOT_FOUND) {
+                            LOG.info("Index " + indexTableName + " has been 
dropped. Ignore uncommitted mutations");
+                            continue;
+                        }
+                        if (result.getMutationCode() != 
MutationCode.TABLE_ALREADY_EXISTS) {
+                            if (leaveIndexActive) {
+                                LOG.warn("Attempt to update 
INDEX_DISABLE_TIMESTAMP " + " failed with code = "
+                                        + result.getMutationCode());
+                                // If we're not disabling the index, then we 
don't want to throw as throwing
+                                // will lead to the RS being shutdown.
+                                if (blockDataTableWritesOnFailure) { throw new 
DoNotRetryIOException(
+                                        "Attempt to update 
INDEX_DISABLE_TIMESTAMP failed."); }
+                            } else {
+                                LOG.warn("Attempt to disable index " + 
indexTableName + " failed with code = "
+                                        + result.getMutationCode() + ". Will 
use default failure policy instead.");
+                                throw new DoNotRetryIOException("Attempt to 
disable " + indexTableName + " failed.");
+                            }
+                        }
+                        if (leaveIndexActive)
+                            LOG.info("Successfully update 
INDEX_DISABLE_TIMESTAMP for " + indexTableName
+                                    + " due to an exception while writing 
updates.", cause);
+                        else
+                            LOG.info("Successfully disabled index " + 
indexTableName
+                                    + " due to an exception while writing 
updates.", cause);
+                    } catch (Throwable t) {
+                        if (t instanceof Exception) {
+                            throw (Exception)t;
+                        } else {
+                            throw new Exception(t);
                         }
-                    } else {
-                        LOG.warn("Attempt to disable index " + indexTableName 
+ " failed with code = "
-                                + result.getMutationCode() + ". Will use 
default failure policy instead.");
-                        throw new DoNotRetryIOException("Attempt to disable " 
+ indexTableName + " failed.");
-                    } 
+                    }
                 }
-                if (leaveIndexActive)
-                    LOG.info("Successfully update INDEX_DISABLE_TIMESTAMP for 
" + indexTableName + " due to an exception while writing updates.",
-                            cause);
-                else
-                    LOG.info("Successfully disabled index " + indexTableName + 
" due to an exception while writing updates.",
-                            cause);
+                // Return the cell time stamp (note they should all be the 
same)
+                return fTimestamp;
             }
-        }
-        // Return the cell time stamp (note they should all be the same)
-        return timestamp;
+        });
     }
 
     private Collection<? extends String> 
getLocalIndexNames(HTableInterfaceReference ref,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
index 532b586..f8f8501 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/ConnectionQueryServicesImpl.java
@@ -867,7 +867,8 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
                     }
                 }
             }
-            if (SchemaUtil.isStatsTable(tableName) && 
!descriptor.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) {
+            if ((SchemaUtil.isStatsTable(tableName) || 
SchemaUtil.isMetaTable(tableName))
+                    && 
!descriptor.hasCoprocessor(MultiRowMutationEndpoint.class.getName())) {
                 
descriptor.addCoprocessor(MultiRowMutationEndpoint.class.getName(),
                         null, priority, null);
             }
@@ -2528,7 +2529,8 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
             final TableName mutexTableName = SchemaUtil.getPhysicalTableName(
                 PhoenixDatabaseMetaData.SYSTEM_MUTEX_NAME, props);
             List<TableName> systemTables = getSystemTableNames(admin);
-            if (systemTables.contains(mutexTableName)) {
+            if (systemTables.contains(mutexTableName) || admin.tableExists( 
TableName.valueOf(
+                
PhoenixDatabaseMetaData.SYSTEM_SCHEMA_NAME,PhoenixDatabaseMetaData.SYSTEM_MUTEX_TABLE_NAME)))
 {
                 logger.debug("System mutex table already appears to exist, not 
creating it");
                 return;
             }
@@ -2545,8 +2547,15 @@ public class ConnectionQueryServicesImpl extends 
DelegateQueryServices implement
                 
put.add(PhoenixDatabaseMetaData.SYSTEM_MUTEX_FAMILY_NAME_BYTES, UPGRADE_MUTEX, 
UPGRADE_MUTEX_UNLOCKED);
                 sysMutexTable.put(put);
             }
-        } catch (TableExistsException e) {
+        } catch (TableExistsException | AccessDeniedException e) {
             // Ignore
+        }catch(PhoenixIOException e){
+            if(e.getCause()!=null && e.getCause() instanceof 
AccessDeniedException)
+            {
+                //Ignore
+            }else{
+                throw e;
+            }
         }
     }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
index a4a4124..b9ed734 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServices.java
@@ -260,6 +260,10 @@ public interface QueryServices extends SQLCloseable {
     
     //currently BASE64 and ASCII is supported
     public static final String UPLOAD_BINARY_DATA_TYPE_ENCODING = 
"phoenix.upload.binaryDataType.encoding";
+    // Toggle for server-written updates to SYSTEM.CATALOG
+    public static final String PHOENIX_ACLS_ENABLED = "phoenix.acls.enabled";
+    public static final String PHOENIX_AUTOMATIC_GRANT_ENABLED = 
"phoenix.security.automatic.grant.enabled";
+    public static final String PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED 
= "phoenix.security.strict.mode.enabled";
 
     public static final String INDEX_ASYNC_BUILD_ENABLED = 
"phoenix.index.async.build.enabled";
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
index af6a054..a586c28 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/query/QueryServicesOptions.java
@@ -58,11 +58,14 @@ import static 
org.apache.phoenix.query.QueryServices.MAX_TENANT_MEMORY_PERC_ATTR
 import static 
org.apache.phoenix.query.QueryServices.MIN_STATS_UPDATE_FREQ_MS_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.MUTATE_BATCH_SIZE_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServices.NUM_RETRIES_FOR_SCHEMA_UPDATE_CHECK;
+import static org.apache.phoenix.query.QueryServices.PHOENIX_ACLS_ENABLED;
+import static 
org.apache.phoenix.query.QueryServices.PHOENIX_AUTOMATIC_GRANT_ENABLED;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_CLUSTER_BASE_PATH;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_LOADBALANCER_ENABLED;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_SERVICE_NAME;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_ZK_ACL_PASSWORD;
 import static 
org.apache.phoenix.query.QueryServices.PHOENIX_QUERY_SERVER_ZK_ACL_USERNAME;
+import static 
org.apache.phoenix.query.QueryServices.PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED;
 import static org.apache.phoenix.query.QueryServices.QUEUE_SIZE_ATTRIB;
 import static 
org.apache.phoenix.query.QueryServices.REGIONSERVER_INFO_PORT_ATTRIB;
 import static org.apache.phoenix.query.QueryServices.RENEW_LEASE_ENABLED;
@@ -316,6 +319,11 @@ public class QueryServicesOptions {
     public static final int DEFAULT_CLIENT_CONNECTION_MAX_ALLOWED_CONNECTIONS 
= 0;
     public static final boolean DEFAULT_STATS_COLLECTION_ENABLED = true;
     public static final boolean DEFAULT_USE_STATS_FOR_PARALLELIZATION = true;
+    
+    //Security defaults
+    public static final boolean DEFAULT_PHOENIX_ACLS_ENABLED = false;
+    public static final boolean DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED = 
false;
+    public static final boolean 
DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED = true;
 
     //default update cache frequency
     public static final int DEFAULT_UPDATE_CACHE_FREQUENCY = 0;
@@ -413,7 +421,11 @@ public class QueryServicesOptions {
             .setIfUnset(TRACING_BATCH_SIZE, DEFAULT_TRACING_BATCH_SIZE)
             .setIfUnset(TRACING_THREAD_POOL_SIZE, 
DEFAULT_TRACING_THREAD_POOL_SIZE)
             .setIfUnset(STATS_COLLECTION_ENABLED, 
DEFAULT_STATS_COLLECTION_ENABLED)
-            .setIfUnset(USE_STATS_FOR_PARALLELIZATION, 
DEFAULT_USE_STATS_FOR_PARALLELIZATION);
+            .setIfUnset(USE_STATS_FOR_PARALLELIZATION, 
DEFAULT_USE_STATS_FOR_PARALLELIZATION)
+            .setIfUnset(UPLOAD_BINARY_DATA_TYPE_ENCODING, 
DEFAULT_UPLOAD_BINARY_DATA_TYPE_ENCODING)
+            .setIfUnset(PHOENIX_ACLS_ENABLED,  DEFAULT_PHOENIX_ACLS_ENABLED)
+            .setIfUnset(PHOENIX_AUTOMATIC_GRANT_ENABLED,  
DEFAULT_PHOENIX_AUTOMATIC_GRANT_ENABLED)
+            .setIfUnset(PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED,  
DEFAULT_PHOENIX_SECURITY_PERMISSION_STRICT_MODE_ENABLED);
         // HBase sets this to 1, so we reset it to something more appropriate.
         // Hopefully HBase will change this, because we can't know if a user 
set
         // it to 1, so we'll change it.

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
index 3ae3183..8956862 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/stats/StatisticsWriter.java
@@ -25,6 +25,7 @@ import java.io.DataInput;
 import java.io.DataInputStream;
 import java.io.EOFException;
 import java.io.IOException;
+import java.security.PrivilegedExceptionAction;
 import java.sql.Date;
 import java.util.ArrayList;
 import java.util.List;
@@ -46,6 +47,7 @@ import 
org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto.Mut
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MultiRowMutationService;
 import 
org.apache.hadoop.hbase.protobuf.generated.MultiRowMutationProtos.MutateRowsRequest;
 import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.security.User;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
@@ -208,23 +210,31 @@ public class StatisticsWriter implements Closeable {
         }
     }
 
-    public void commitStats(List<Mutation> mutations, StatisticsCollector 
statsCollector) throws IOException {
-        commitLastStatsUpdatedTime(statsCollector);
-        if (mutations.size() > 0) {
-            byte[] row = mutations.get(0).getRow();
-            MutateRowsRequest.Builder mrmBuilder = 
MutateRowsRequest.newBuilder();
-            for (Mutation m : mutations) {
-                
mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
-            }
-            MutateRowsRequest mrm = mrmBuilder.build();
-            CoprocessorRpcChannel channel = 
statsWriterTable.coprocessorService(row);
-            MultiRowMutationService.BlockingInterface service = 
MultiRowMutationService.newBlockingStub(channel);
-            try {
-                service.mutateRows(null, mrm);
-            } catch (ServiceException ex) {
-                ProtobufUtil.toIOException(ex);
+    public void commitStats(final List<Mutation> mutations, final 
StatisticsCollector statsCollector)
+            throws IOException {
+        User.runAsLoginUser(new PrivilegedExceptionAction<Void>() {
+            @Override
+            public Void run() throws Exception {
+                commitLastStatsUpdatedTime(statsCollector);
+                if (mutations.size() > 0) {
+                    byte[] row = mutations.get(0).getRow();
+                    MutateRowsRequest.Builder mrmBuilder = 
MutateRowsRequest.newBuilder();
+                    for (Mutation m : mutations) {
+                        
mrmBuilder.addMutationRequest(ProtobufUtil.toMutation(getMutationType(m), m));
+                    }
+                    MutateRowsRequest mrm = mrmBuilder.build();
+                    CoprocessorRpcChannel channel = 
statsWriterTable.coprocessorService(row);
+                    MultiRowMutationService.BlockingInterface service = 
MultiRowMutationService
+                            .newBlockingStub(channel);
+                    try {
+                        service.mutateRows(null, mrm);
+                    } catch (ServiceException ex) {
+                        ProtobufUtil.toIOException(ex);
+                    }
+                }
+                return null;
             }
-        }
+        });
     }
 
     private Put getLastStatsUpdatedTimePut(long timeStamp) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index 502ef37..2a0c8f0 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -59,6 +59,7 @@ import org.apache.phoenix.schema.PColumn;
 import org.apache.phoenix.schema.PName;
 import org.apache.phoenix.schema.PNameFactory;
 import org.apache.phoenix.schema.PTable;
+import org.apache.phoenix.schema.PTable.IndexType;
 import org.apache.phoenix.schema.PTable.LinkType;
 import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.SequenceKey;
@@ -226,6 +227,16 @@ public class MetaDataUtil {
         }
         return null;
     }
+
+    public static boolean isNameSpaceMapped(List<Mutation> tableMetaData, 
KeyValueBuilder builder,
+            ImmutableBytesWritable value) {
+        if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData),
+            PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES, builder, 
value)) {
+            return 
(boolean)PBoolean.INSTANCE.toObject(ByteUtil.copyKeyBytesIfNecessary(value));
+        }
+        return false;
+    }
+
     
     public static long getParentSequenceNumber(List<Mutation> tableMetaData) {
         return getSequenceNumber(getParentTableHeaderRow(tableMetaData));
@@ -670,4 +681,11 @@ public class MetaDataUtil {
         byte[] physicalTableName = 
Bytes.toBytes(SchemaUtil.getTableNameFromFullName(view.getPhysicalName().getString()));
         return SchemaUtil.getTableKey(ByteUtil.EMPTY_BYTE_ARRAY, 
physicalTableSchemaName, physicalTableName);
     }
+    
+    public static IndexType getIndexType(List<Mutation> tableMetaData, 
KeyValueBuilder builder,
+            ImmutableBytesWritable value) {
+        if (getMutationValue(getPutOnlyTableHeaderRow(tableMetaData), 
PhoenixDatabaseMetaData.INDEX_TYPE_BYTES, builder,
+                value)) { return 
IndexType.fromSerializedValue(value.get()[value.getOffset()]); }
+        return null;
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/217867c7/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 51f6ff9..47b4b43 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -20,9 +20,11 @@ package org.apache.phoenix.util;
 import static com.google.common.base.Preconditions.checkArgument;
 import static com.google.common.base.Preconditions.checkNotNull;
 import static com.google.common.base.Strings.isNullOrEmpty;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.IS_NAMESPACE_MAPPED_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG_NAME_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_FUNCTION_NAME_BYTES;
 import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_STATS_NAME_BYTES;
+import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES;
 
 import java.sql.Connection;
 import java.sql.DatabaseMetaData;
@@ -42,8 +44,10 @@ import java.util.TreeSet;
 import javax.annotation.Nullable;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
 import org.apache.hadoop.hbase.util.Bytes;
@@ -76,6 +80,7 @@ import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.SortOrder;
 import org.apache.phoenix.schema.TableProperty;
 import org.apache.phoenix.schema.ValueSchema.Field;
+import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.types.PDataType;
 import org.apache.phoenix.schema.types.PVarbinary;
 import org.apache.phoenix.schema.types.PVarchar;
@@ -1130,4 +1135,11 @@ public class SchemaUtil {
         }
         return false;
     }
+
+    public static boolean isNamespaceMapped(Result currentResult) {
+        Cell isNamespaceMappedCell = 
currentResult.getColumnLatestCell(TABLE_FAMILY_BYTES, 
IS_NAMESPACE_MAPPED_BYTES);
+        return isNamespaceMappedCell!=null && (boolean) 
PBoolean.INSTANCE.toObject(isNamespaceMappedCell.getValue());
+    }
+    
+
 }

Reply via email to