openinx commented on a change in pull request #163: HBASE-21995 Add a 
coprocessor to set HDFS ACL for hbase granted user
URL: https://github.com/apache/hbase/pull/163#discussion_r296147072
 
 

 ##########
 File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
 ##########
 @@ -0,0 +1,737 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
+import static org.apache.hadoop.hbase.security.access.Permission.Action.READ;
+
+import java.io.Closeable;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.client.TableDescriptor;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * A helper to modify or remove HBase granted user default and access HDFS 
ACLs over hFiles.
+ */
[email protected]
+public class SnapshotScannerHDFSAclHelper implements Closeable {
+  private static final Logger LOG = 
LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
+
+  public static final String USER_SCAN_SNAPSHOT_ENABLE = 
"hbase.user.scan.snapshot.enable";
+  public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER =
+      "hbase.user.scan.snapshot.thread.number";
+  // The tmp directory to restore snapshot, it can not be a sub directory of 
HBase root dir
+  public static final String SNAPSHOT_RESTORE_TMP_DIR = 
"hbase.snapshot.restore.tmp.dir";
+  public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
+      "/hbase/.tmpdir-to-restore-snapshot";
+  // The default permission of the common directories if the feature is 
enabled.
+  public static final String COMMON_DIRECTORY_PERMISSION =
+      "hbase.user.scan.snapshot.common.directory.permission";
+  // The secure HBase permission is 700, 751 means all others have execute 
access and the mask is
+  // set to read-execute to make the extended access ACL entries can work. Be 
cautious to set
+  // this value.
+  public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
+  // The default permission of the snapshot restore directories if the feature 
is enabled.
+  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
+      "hbase.user.scan.snapshot.restore.directory.permission";
+  // 753 means all others have write-execute access.
+  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = 
"753";
+
+  private Admin admin;
+  private final Configuration conf;
+  private FileSystem fs;
+  private PathHelper pathHelper;
+  private ExecutorService pool;
+
+  public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection 
connection)
+      throws IOException {
+    this.conf = configuration;
+    this.pathHelper = new PathHelper(conf);
+    this.fs = pathHelper.getFileSystem();
+    this.pool = 
Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10),
+      new 
ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
+    this.admin = connection.getAdmin();
+  }
+
+  @Override
+  public void close() {
+    if (pool != null) {
+      pool.shutdown();
+    }
+    admin.close();
+  }
+
+  public void setCommonDirectoryPermission() throws IOException {
+    // Set public directory permission to 751 to make all users have access 
permission.
+    // And we also need the access permission of the parent of HBase root 
directory, but
+    // it's not set here, because the owner of HBase root directory may don't 
own permission
+    // to change it's parent permission to 751.
+    // The {root/.tmp} and {root/.tmp/data} directories are created to make 
global user HDFS
+    // ACLs can be inherited.
+    List<Path> paths = Lists.newArrayList(pathHelper.getRootDir(), 
pathHelper.getMobDir(),
+      pathHelper.getTmpDir(), pathHelper.getArchiveDir());
+    paths.addAll(getGlobalRootPaths());
+    for (Path path : paths) {
+      if (!fs.exists(path)) {
+        fs.mkdirs(path);
+      }
+      fs.setPermission(path, new FsPermission(
+          conf.get(COMMON_DIRECTORY_PERMISSION, 
COMMON_DIRECTORY_PERMISSION_DEFAULT)));
+    }
+    // create snapshot restore directory
+    Path restoreDir =
+        new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, 
SNAPSHOT_RESTORE_TMP_DIR_DEFAULT));
+    if (!fs.exists(restoreDir)) {
+      fs.mkdirs(restoreDir);
+      fs.setPermission(restoreDir, new 
FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
+        SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
+    }
+  }
+
+  /**
+   * Set acl when grant user permission
+   * @param userPermission the user and permission
+   * @param skipNamespaces the namespace set to skip set acl because already 
set
+   * @param skipTables the table set to skip set acl because already set
+   * @return false if an error occurred, otherwise true
+   */
+  public boolean grantAcl(UserPermission userPermission, Set<String> 
skipNamespaces,
+      Set<TableName> skipTables) {
+    try {
+      long start = System.currentTimeMillis();
+      handleGrantOrRevokeAcl(userPermission, 
HDFSAclOperation.OperationType.MODIFY, skipNamespaces,
+        skipTables);
+      LOG.info("Set HDFS acl when grant {}, cost {} ms", userPermission,
+        System.currentTimeMillis() - start);
+      return true;
+    } catch (Exception e) {
+      LOG.error("Set HDFS acl error when grant: {}", userPermission, e);
+      return false;
+    }
+  }
+
+  /**
+   * Remove acl when grant or revoke user permission
+   * @param userPermission the user and permission
+   * @param skipNamespaces the namespace set to skip remove acl
+   * @param skipTables the table set to skip remove acl
+   * @return false if an error occurred, otherwise true
+   */
+  public boolean revokeAcl(UserPermission userPermission, Set<String> 
skipNamespaces,
+      Set<TableName> skipTables) {
+    try {
+      long start = System.currentTimeMillis();
+      handleGrantOrRevokeAcl(userPermission, 
HDFSAclOperation.OperationType.REMOVE, skipNamespaces,
+        skipTables);
+      LOG.info("Set HDFS acl when revoke {}, cost {} ms", userPermission,
+        System.currentTimeMillis() - start);
+      return true;
+    } catch (Exception e) {
+      LOG.error("Set HDFS acl error when revoke: {}", userPermission, e);
+      return false;
+    }
+  }
+
+  /**
+   * Set acl when take a snapshot
+   * @param snapshot the snapshot desc
+   * @return false if an error occurred, otherwise true
+   */
+  public boolean snapshotAcl(SnapshotDescription snapshot) {
+    try {
+      long start = System.currentTimeMillis();
+      TableName tableName = snapshot.getTableName();
+      // global user permission can be inherited from default acl automatically
+      Set<String> userSet = getUsersWithTableReadAction(tableName);
+      
userSet.addAll(getUsersWithNamespaceReadAction(tableName.getNamespaceAsString()));
+      Path path = pathHelper.getSnapshotDir(snapshot.getName());
+      handleHDFSAcl(new HDFSAclOperation(fs, path, userSet, 
HDFSAclOperation.OperationType.MODIFY,
+          true, HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS)).get();
+      LOG.info("Set HDFS acl when snapshot {}, cost {} ms", snapshot.getName(),
+        System.currentTimeMillis() - start);
+      return true;
+    } catch (Exception e) {
+      LOG.error("Set HDFS acl error when snapshot {}", snapshot, e);
+      return false;
+    }
+  }
+
+  /**
+   * Reset acl when truncate table
+   * @param tableName the specific table
+   * @return false if an error occurred, otherwise true
+   */
+  public boolean resetTableAcl(TableName tableName) {
+    try {
+      long start = System.currentTimeMillis();
+      // global and namespace user permission can be inherited from default 
acl automatically
+      setTableAcl(tableName, getUsersWithTableReadAction(tableName));
+      LOG.info("Set HDFS acl when truncate {}, cost {} ms", tableName,
+        System.currentTimeMillis() - start);
+      return true;
+    } catch (Exception e) {
+      LOG.error("Set HDFS acl error when truncate {}", tableName, e);
+      return false;
+    }
+  }
+
+  /**
+   * Remove table access acl from namespace dir when delete table
+   * @param tableName the table
+   * @param removeUsers the users whose access acl will be removed
+   * @return false if an error occurred, otherwise true
+   */
+  public boolean removeNamespaceAcl(TableName tableName, Set<String> 
removeUsers) {
+    try {
+      long start = System.currentTimeMillis();
+      List<AclEntry> aclEntries = removeUsers.stream()
+          .map(removeUser -> aclEntry(ACCESS, 
removeUser)).collect(Collectors.toList());
+      String namespace = tableName.getNamespaceAsString();
+      List<Path> nsPaths = 
Lists.newArrayList(pathHelper.getTmpNsDir(namespace),
+        pathHelper.getDataNsDir(namespace), 
pathHelper.getMobDataNsDir(namespace));
+      // If table has no snapshots, then remove archive ns HDFS acl, otherwise 
reserve the archive
+      // ns acl to make the snapshots can be scanned, in the second case, need 
to remove the archive
+      // ns acl when all snapshots of the deleted table are deleted (will do 
it in later work).
+      if (getTableSnapshotPaths(tableName).size() == 0) {
+        nsPaths.add(pathHelper.getArchiveNsDir(namespace));
+      }
+      for (Path nsPath : nsPaths) {
+        fs.removeAclEntries(nsPath, aclEntries);
+      }
+      LOG.info("Remove HDFS acl when delete table {}, cost {} ms", tableName,
+        System.currentTimeMillis() - start);
+      return true;
+    } catch (Exception e) {
+      LOG.error("Set HDFS acl error when delete table {}", tableName, e);
+      return false;
+    }
+  }
+
+  /**
+   * Set table owner acl when create table
+   * @param tableName the table
+   * @param user the table owner
+   * @return false if an error occurred, otherwise true
+   */
+  public boolean addTableAcl(TableName tableName, String user) {
+    try {
+      long start = System.currentTimeMillis();
+      setTableAcl(tableName, Sets.newHashSet(user));
+      LOG.info("Set HDFS acl when create table {}, cost {} ms", tableName,
+        System.currentTimeMillis() - start);
+      return true;
+    } catch (Exception e) {
+      LOG.error("Set HDFS acl error when create table {}", tableName, e);
+      return false;
+    }
+  }
+
+  private void handleGrantOrRevokeAcl(UserPermission userPermission,
+      HDFSAclOperation.OperationType operationType, Set<String> skipNamespaces,
+      Set<TableName> skipTables) throws ExecutionException, 
InterruptedException, IOException {
+    Set<String> users = Sets.newHashSet(userPermission.getUser());
+    switch (userPermission.getAccessScope()) {
+      case GLOBAL:
+        handleGlobalAcl(users, skipNamespaces, skipTables, operationType);
+        break;
+      case NAMESPACE:
+        NamespacePermission namespacePermission =
+            (NamespacePermission) userPermission.getPermission();
+        
handleNamespaceAcl(Sets.newHashSet(namespacePermission.getNamespace()), users,
+          skipNamespaces, skipTables, operationType);
+        break;
+      case TABLE:
+        TablePermission tablePermission = (TablePermission) 
userPermission.getPermission();
+        handleNamespaceAccessAcl(tablePermission.getNamespace(), users, 
operationType);
+        handleTableAcl(Sets.newHashSet(tablePermission.getTableName()), users, 
skipNamespaces,
+          skipTables, operationType);
+        break;
+      default:
+        throw new IllegalArgumentException(
+            "Illegal user permission scope " + 
userPermission.getAccessScope());
+    }
+  }
+
+  private void handleGlobalAcl(Set<String> users, Set<String> skipNamespaces,
+      Set<TableName> skipTables, HDFSAclOperation.OperationType operationType)
+      throws ExecutionException, InterruptedException, IOException {
+    // handle global root directories HDFS acls
+    List<HDFSAclOperation> hdfsAclOperations = getGlobalRootPaths().stream()
+        .map(path -> new HDFSAclOperation(fs, path, users, operationType, 
false,
+            HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
+        .collect(Collectors.toList());
+    handleHDFSAclParallel(hdfsAclOperations).get();
+    // handle namespace HDFS acls
+    handleNamespaceAcl(Sets.newHashSet(admin.listNamespaces()), users, 
skipNamespaces, skipTables,
+      operationType);
+  }
+
+  private void handleNamespaceAcl(Set<String> namespaces, Set<String> users,
+      Set<String> skipNamespaces, Set<TableName> skipTables,
+      HDFSAclOperation.OperationType operationType)
+      throws ExecutionException, InterruptedException, IOException {
+    Set<String> filterNamespaces =
+        namespaces.stream().filter(ns -> 
!skipNamespaces.contains(ns)).collect(Collectors.toSet());
+    // handle namespace root directories HDFS acls
+    List<HDFSAclOperation> hdfsAclOperations = new ArrayList<>();
+    Set<String> skipTableNamespaces =
+        
skipTables.stream().map(TableName::getNamespaceAsString).collect(Collectors.toSet());
+    for (String ns : filterNamespaces) {
+      /**
+       * When op is REMOVE, remove the DEFAULT namespace ACL while keep the 
ACCESS for skipTables,
+       * otherwise remove both the DEFAULT + ACCESS ACLs. When op is MODIFY, 
just operate the
+       * DEFAULT + ACCESS ACLs.
+       */
+      HDFSAclOperation.OperationType op = operationType;
+      HDFSAclOperation.AclType aclType = 
HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS;
+      if (operationType == HDFSAclOperation.OperationType.REMOVE
+          && skipTableNamespaces.contains(ns)) {
+        // remove namespace directories default HDFS acls for skip tables
+        op = HDFSAclOperation.OperationType.REMOVE;
+        aclType = HDFSAclOperation.AclType.DEFAULT;
+      }
+      for (Path path : getNamespaceRootPaths(ns)) {
+        hdfsAclOperations.add(new HDFSAclOperation(fs, path, users, op, false, 
aclType));
+      }
+    }
+    handleHDFSAclParallel(hdfsAclOperations).get();
+    // handle table directories HDFS acls
+    Set<TableName> tables = new HashSet<>();
+    for (String namespace : filterNamespaces) {
+      
tables.addAll(admin.listTableDescriptorsByNamespace(Bytes.toBytes(namespace)).stream()
+          .map(TableDescriptor::getTableName).collect(Collectors.toSet()));
+    }
+    handleTableAcl(tables, users, skipNamespaces, skipTables, operationType);
+  }
+
+  private void handleTableAcl(Set<TableName> tableNames, Set<String> users,
+      Set<String> skipNamespaces, Set<TableName> skipTables,
+      HDFSAclOperation.OperationType operationType)
+      throws ExecutionException, InterruptedException, IOException {
+    Set<TableName> filterTableNames = new HashSet<>();
+    for (TableName tableName : tableNames) {
+      if (!skipTables.contains(tableName)
+          && !skipNamespaces.contains(tableName.getNamespaceAsString())) {
+        filterTableNames.add(tableName);
+      }
+    }
+    List<CompletableFuture<Void>> futures = new ArrayList<>();
+    // handle table HDFS acls
+    for (TableName tableName : filterTableNames) {
+      List<HDFSAclOperation> hdfsAclOperations = getTableRootPaths(tableName, 
true).stream()
+          .map(path -> new HDFSAclOperation(fs, path, users, operationType, 
true,
+              HDFSAclOperation.AclType.DEFAULT_ADN_ACCESS))
+          .collect(Collectors.toList());
+      CompletableFuture<Void> future = 
handleHDFSAclSequential(hdfsAclOperations);
+      futures.add(future);
+    }
+    CompletableFuture<Void> future =
+        CompletableFuture.allOf(futures.toArray(new 
CompletableFuture[futures.size()]));
+    future.get();
+  }
+
+  private void handleNamespaceAccessAcl(String namespace, Set<String> users,
+      HDFSAclOperation.OperationType operationType)
+      throws ExecutionException, InterruptedException {
+    // handle namespace access HDFS acls
+    List<HDFSAclOperation> hdfsAclOperations =
+        getNamespaceRootPaths(namespace).stream().map(path -> new 
HDFSAclOperation(fs, path, users,
+            operationType, false, 
HDFSAclOperation.AclType.ACCESS)).collect(Collectors.toList());
+    CompletableFuture<Void> future = handleHDFSAclParallel(hdfsAclOperations);
+    future.get();
+  }
+
+  private void setTableAcl(TableName tableName, Set<String> users)
+      throws ExecutionException, InterruptedException, IOException {
+    HDFSAclOperation.OperationType operationType = 
HDFSAclOperation.OperationType.MODIFY;
+    handleNamespaceAccessAcl(tableName.getNamespaceAsString(), users, 
operationType);
+    handleTableAcl(Sets.newHashSet(tableName), users, new HashSet<>(0), new 
HashSet<>(0),
+      operationType);
+  }
+
+  /**
+   * return paths that user will global permission will visit
+   * @return the path list
+   */
+  private List<Path> getGlobalRootPaths() {
+    ArrayList<Path> paths = Lists.newArrayList(pathHelper.getTmpDataDir(), 
pathHelper.getDataDir(),
 
 Review comment:
   Just use the : 
   
   ```
   return Lists.newArrayList(pathHelper.getTmpDataDir(), 
pathHelper.getDataDir(),
         pathHelper.getMobDataDir(), pathHelper.getArchiveDataDir(), 
pathHelper.getSnapshotRootDir());
   ```
   

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to