openinx commented on a change in pull request #163: HBASE-21995 Add a 
coprocessor to set HDFS ACL for hbase granted user
URL: https://github.com/apache/hbase/pull/163#discussion_r295109861
 
 

 ##########
 File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclController.java
 ##########
 @@ -0,0 +1,667 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Optional;
+import java.util.Set;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.CellUtil;
+import org.apache.hadoop.hbase.HBaseInterfaceAudience;
+import org.apache.hadoop.hbase.NamespaceDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.TableNotFoundException;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.ColumnFamilyDescriptorBuilder;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.apache.hadoop.hbase.client.Result;
+import org.apache.hadoop.hbase.client.ResultScanner;
+import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.client.TableDescriptorBuilder;
+import org.apache.hadoop.hbase.coprocessor.CoreCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.HasMasterServices;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessor;
+import org.apache.hadoop.hbase.coprocessor.MasterCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.MasterObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.master.MasterServices;
+import org.apache.hadoop.hbase.security.User;
+import org.apache.hadoop.hbase.security.UserProvider;
+import org.apache.hadoop.hbase.security.access.Permission.Action;
+import 
org.apache.hadoop.hbase.security.access.SnapshotScannerHDFSAclHelper.PathHelper;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+/**
+ * Set HDFS ACLs to hFiles to make HBase granted users have permission to scan 
snapshot
+ * <p>
+ * To use this feature, please mask sure HDFS config:
+ * <ul>
+ * <li>dfs.permissions.enabled = true</li>
+ * <li>fs.permissions.umask-mode = 027 (or smaller umask than 027)</li>
+ * </ul>
+ * </p>
+ * <p>
+ * The implementation of this feature is as followings:
+ * <ul>
+ * <li>For common directories such as 'data' and 'archive', set other 
permission to '--x' to make
+ * everyone have the permission to access the directory.</li>
+ * <li>For namespace or table directories such as 'data/ns/table', 
'archive/ns/table' and
+ * '.hbase-snapshot/snapshotName', set user 'r-x' access acl and 'r-x' default 
acl when following
+ * operations happen:
+ * <ul>
+ * <li>grant user with global, namespace or table permission;</li>
+ * <li>revoke user from global, namespace or table;</li>
+ * <li>snapshot table;</li>
+ * <li>truncate table;</li>
+ * </ul>
+ * </li>
+ * <li>Note: Because snapshots are at table level, so this feature just 
considers users with global,
+ * namespace or table permissions, ignores users with table CF or cell 
permissions.</li>
+ * </ul>
+ * </p>
+ */
+@CoreCoprocessor
+@InterfaceAudience.LimitedPrivate(HBaseInterfaceAudience.CONFIG)
+public class SnapshotScannerHDFSAclController implements MasterCoprocessor, 
MasterObserver {
+  private static final Logger LOG = 
LoggerFactory.getLogger(SnapshotScannerHDFSAclController.class);
+
+  private SnapshotScannerHDFSAclHelper hdfsAclHelper = null;
+  private PathHelper pathHelper = null;
+  private FileSystem fs = null;
+  private volatile boolean initialized = false;
+  /** Provider for mapping principal names to Users */
+  private UserProvider userProvider;
+
+  @Override
+  public Optional<MasterObserver> getMasterObserver() {
+    return Optional.of(this);
+  }
+
+  @Override
+  public void 
preMasterInitialization(ObserverContext<MasterCoprocessorEnvironment> c)
+      throws IOException {
+    if (c.getEnvironment().getConfiguration()
+        .getBoolean(SnapshotScannerHDFSAclHelper.USER_SCAN_SNAPSHOT_ENABLE, 
false)) {
+      MasterCoprocessorEnvironment mEnv = c.getEnvironment();
+      if (!(mEnv instanceof HasMasterServices)) {
+        throw new IOException("Does not implement HMasterServices");
+      }
+      MasterServices masterServices = ((HasMasterServices) 
mEnv).getMasterServices();
+      hdfsAclHelper = new 
SnapshotScannerHDFSAclHelper(masterServices.getConfiguration(),
+          masterServices.getConnection());
+      pathHelper = hdfsAclHelper.getPathHelper();
+      fs = pathHelper.getFileSystem();
+      hdfsAclHelper.setCommonDirectoryPermission();
+      initialized = true;
+      userProvider = 
UserProvider.instantiate(c.getEnvironment().getConfiguration());
+    } else {
+      LOG.warn(
+        "Load SnapshotScannerHDFSAclController but 
hbase.user.scan.snapshot.enable is false");
+    }
+  }
+
+  @Override
+  public void postStartMaster(ObserverContext<MasterCoprocessorEnvironment> c) 
throws IOException {
+    if (checkInitialized()) {
+      try (Admin admin = c.getEnvironment().getConnection().getAdmin()) {
+        if (admin.tableExists(PermissionStorage.ACL_TABLE_NAME)) {
+          // Check if hbase acl table has 'm' CF, if not, add 'm' CF
+          TableDescriptor tableDescriptor = 
admin.getDescriptor(PermissionStorage.ACL_TABLE_NAME);
+          boolean containHdfsAclFamily =
+              
Arrays.stream(tableDescriptor.getColumnFamilies()).anyMatch(family -> Bytes
+                  .equals(family.getName(), 
SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY));
+          if (!containHdfsAclFamily) {
+            TableDescriptorBuilder builder = 
TableDescriptorBuilder.newBuilder(tableDescriptor)
+                .setColumnFamily(ColumnFamilyDescriptorBuilder
+                    
.newBuilder(SnapshotScannerHDFSAclStorage.HDFS_ACL_FAMILY).build());
+            admin.modifyTable(builder.build());
+          }
+        } else {
+          LOG.error(
+            "Table {} is not created yet. Please check if {} is configured 
after "
+                + "the {} Coprocessor",
+            PermissionStorage.ACL_TABLE_NAME, getClass().getName(),
+            AccessController.class.getName());
+          throw new TableNotFoundException(
+              "Table " + PermissionStorage.ACL_TABLE_NAME + " is not yet 
created");
+        }
+      }
+    }
+  }
+
+  @Override
+  public void preStopMaster(ObserverContext<MasterCoprocessorEnvironment> c) {
+    if (checkInitialized()) {
+      hdfsAclHelper.close();
+    }
+  }
+
+  @Override
+  public void 
postCompletedCreateTableAction(ObserverContext<MasterCoprocessorEnvironment> c,
+      TableDescriptor desc, RegionInfo[] regions) throws IOException {
+    if (!desc.getTableName().isSystemTable() && checkInitialized()) {
+      TableName tableName = desc.getTableName();
+      Path[] paths =
+          new Path[] { pathHelper.getTmpTableDir(tableName), 
pathHelper.getDataTableDir(tableName),
+              pathHelper.getMobTableDir(tableName), 
pathHelper.getArchiveTableDir(tableName) };
+      for (Path path : paths) {
+        if (!fs.exists(path)) {
+          fs.mkdirs(path);
+        }
+      }
+      // Add table owner HDFS acls
+      String owner =
+          desc.getOwnerString() == null ? getActiveUser(c).getShortName() : 
desc.getOwnerString();
+      hdfsAclHelper.addTableAcl(desc.getTableName(), owner);
+      try (Table aclTable =
+          
c.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME)) {
+        SnapshotScannerHDFSAclStorage.addUserTableHdfsAcl(aclTable, owner, 
desc.getTableName());
+      }
+    }
+  }
+
+  @Override
+  public void 
postCreateNamespace(ObserverContext<MasterCoprocessorEnvironment> c,
+      NamespaceDescriptor ns) throws IOException {
+    if (checkInitialized()) {
+      Path[] nsDirs = new Path[] { pathHelper.getTmpNsDir(ns.getName()),
+          pathHelper.getArchiveNsDir(ns.getName()), 
pathHelper.getMobDataNsDir(ns.getName()) };
+      for (Path nsDir : nsDirs) {
+        if (!fs.exists(nsDir)) {
+          fs.mkdirs(nsDir);
+        }
+      }
+    }
+  }
+
+  @Override
+  public void 
postCompletedSnapshotAction(ObserverContext<MasterCoprocessorEnvironment> c,
+      SnapshotDescription snapshot, TableDescriptor tableDescriptor) throws 
IOException {
+    if (!tableDescriptor.getTableName().isSystemTable() && checkInitialized()) 
{
+      hdfsAclHelper.snapshotAcl(snapshot);
+    }
+  }
+
+  @Override
+  public void 
postCompletedTruncateTableAction(ObserverContext<MasterCoprocessorEnvironment> 
c,
+      TableName tableName) throws IOException {
+    if (!tableName.isSystemTable() && checkInitialized()) {
+      hdfsAclHelper.resetTableAcl(tableName);
+    }
+  }
+
+  @Override
+  public void postDeleteTable(ObserverContext<MasterCoprocessorEnvironment> 
ctx,
+      TableName tableName) throws IOException {
+    if (!tableName.isSystemTable() && checkInitialized()) {
+      // remove table user HDFS acl from ns data directory if the user has no 
global/the ns/other
+      // tables of the ns permissions
+      Set<String> removeUsers = new HashSet<>();
+      try (Table aclTable =
+          
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME))
 {
+        List<String> users = 
SnapshotScannerHDFSAclStorage.getTableUsers(aclTable, tableName);
+        SnapshotScannerHDFSAclStorage.deleteTableHdfsAcl(aclTable, tableName);
+        for (String user : users) {
+          List<byte[]> userEntries = 
SnapshotScannerHDFSAclStorage.getUserEntries(aclTable, user);
+          boolean remove = true;
+          for (byte[] entry : userEntries) {
+            if (PermissionStorage.isGlobalEntry(entry)) {
+              remove = false;
+              break;
+            } else if (PermissionStorage.isNamespaceEntry(entry) && Bytes
+                .equals(PermissionStorage.fromNamespaceEntry(entry), 
tableName.getNamespace())) {
+              remove = false;
+              break;
+            } else if (Bytes.equals(TableName.valueOf(entry).getNamespace(),
+              tableName.getNamespace())) {
+              remove = false;
+              break;
+            }
+          }
+          if (remove) {
+            removeUsers.add(user);
+          }
+        }
+      }
+      if (removeUsers.size() > 0) {
+        hdfsAclHelper.removeNamespaceAcl(tableName, removeUsers);
+      }
+    }
+  }
+
+  @Override
+  public void 
postDeleteNamespace(ObserverContext<MasterCoprocessorEnvironment> ctx,
+      String namespace) throws IOException {
+    if (checkInitialized()) {
+      try (Table aclTable =
+          
ctx.getEnvironment().getConnection().getTable(PermissionStorage.ACL_TABLE_NAME))
 {
+        SnapshotScannerHDFSAclStorage.deleteNamespaceHdfsAcl(aclTable, 
namespace);
+      }
+      Path tmpNsDir = pathHelper.getTmpNsDir(namespace);
 
 Review comment:
   Why we will remain a tmp/ns directory here ?   In my mind,   after the 
namespace deleted,     all the mob/ns, data/ns, tmp/ns should all be removed 
....  Just confuse here why handle the tmp/ns separately..

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


With regards,
Apache Git Services

Reply via email to