mymeiyi commented on a change in pull request #163: HBASE-21995 Add a 
coprocessor to set HDFS ACL for hbase granted user
URL: https://github.com/apache/hbase/pull/163#discussion_r292835506
 
 

 ##########
 File path: 
hbase-server/src/main/java/org/apache/hadoop/hbase/security/access/SnapshotScannerHDFSAclHelper.java
 ##########
 @@ -0,0 +1,745 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hbase.security.access;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
+import static org.apache.hadoop.hbase.security.access.Permission.Action.READ;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Set;
+import java.util.concurrent.CompletableFuture;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.function.Predicate;
+import java.util.stream.Collectors;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclEntry;
+import org.apache.hadoop.fs.permission.AclEntryScope;
+import org.apache.hadoop.fs.permission.FsAction;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hbase.AuthUtil;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.client.Admin;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.ConnectionFactory;
+import org.apache.hadoop.hbase.client.SnapshotDescription;
+import org.apache.hadoop.hbase.mob.MobUtils;
+import org.apache.hadoop.hbase.util.Pair;
+import org.apache.yetus.audience.InterfaceAudience;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.hbase.thirdparty.com.google.common.collect.Lists;
+import org.apache.hbase.thirdparty.com.google.common.collect.Sets;
+import 
org.apache.hbase.thirdparty.com.google.common.util.concurrent.ThreadFactoryBuilder;
+
+/**
+ * A helper to set HBase granted user access acl and default acl over hFiles.
+ */
[email protected]
+public class SnapshotScannerHDFSAclHelper implements Closeable {
+  private static final Logger LOG = 
LoggerFactory.getLogger(SnapshotScannerHDFSAclHelper.class);
+
+  public static final String USER_SCAN_SNAPSHOT_ENABLE = 
"hbase.user.scan.snapshot.enable";
+  public static final String USER_SCAN_SNAPSHOT_THREAD_NUMBER =
+      "hbase.user.scan.snapshot.thread.number";
+  // the tmp directory to restore snapshot, it can not be a sub directory of 
HBase root dir
+  public static final String SNAPSHOT_RESTORE_TMP_DIR = 
"hbase.snapshot.restore.tmp.dir";
+  public static final String SNAPSHOT_RESTORE_TMP_DIR_DEFAULT =
+      "/hbase/.tmpdir-to-restore-snapshot";
+  // If enable this feature, set public directories permission to 751
+  public static final String COMMON_DIRECTORY_PERMISSION =
+      "hbase.user.scan.snapshot.common.directory.permission";
+  public static final String COMMON_DIRECTORY_PERMISSION_DEFAULT = "751";
+  // If enable this feature, set restore directory permission to 703
+  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION =
+      "hbase.user.scan.snapshot.restore.directory.permission";
+  public static final String SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT = 
"703";
+
+  private Admin admin;
+  private final Configuration conf;
+  private FileSystem fs;
+  private PathHelper pathHelper;
+  private ExecutorService pool;
+
+  public SnapshotScannerHDFSAclHelper(Configuration configuration, Connection 
connection)
+      throws IOException {
+    this.conf = configuration;
+    this.pathHelper = new PathHelper(conf);
+    this.fs = pathHelper.getFileSystem();
+    this.pathHelper = new PathHelper(conf);
+    this.pool = 
Executors.newFixedThreadPool(conf.getInt(USER_SCAN_SNAPSHOT_THREAD_NUMBER, 10),
+      new 
ThreadFactoryBuilder().setNameFormat("hdfs-acl-thread-%d").setDaemon(true).build());
+    if (connection == null) {
+      connection = ConnectionFactory.createConnection(conf);
+    }
+    this.admin = connection.getAdmin();
+  }
+
+  @Override
+  public void close() {
+    if (pool != null) {
+      pool.shutdown();
+    }
+  }
+
+  public void setCommonDirPermission() throws IOException {
+    // Set public directory permission to 751 to make all users have access 
permission.
+    // And we also need the access permission of the parent of HBase root 
directory, but
+    // it's not set here, because the owner of HBase root directory may don't 
own permission
+    // to change it's parent permission to 751.
+    // The {root/.tmp} and {root/.tmp/data} directories are created to make 
global user HDFS
+    // acls can be inherited.
+    Path[] paths = new Path[] { pathHelper.getRootDir(), 
pathHelper.getDataDir(),
+        pathHelper.getMobDir(), pathHelper.getMobDataDir(), 
pathHelper.getTmpDir(),
+        pathHelper.getTmpDataDir(), pathHelper.getArchiveDir(), 
pathHelper.getArchiveDataDir(),
+        pathHelper.getSnapshotRootDir() };
+    for (Path path : paths) {
+      if (!fs.exists(path)) {
+        fs.mkdirs(path);
+      }
+      fs.setPermission(path, new FsPermission(
+          conf.get(COMMON_DIRECTORY_PERMISSION, 
COMMON_DIRECTORY_PERMISSION_DEFAULT)));
+    }
+    // create snapshot restore directory
+    Path restoreDir =
+        new Path(conf.get(SNAPSHOT_RESTORE_TMP_DIR, 
SNAPSHOT_RESTORE_TMP_DIR_DEFAULT));
+    if (!fs.exists(restoreDir)) {
+      fs.mkdirs(restoreDir);
+      fs.setPermission(restoreDir, new 
FsPermission(conf.get(SNAPSHOT_RESTORE_DIRECTORY_PERMISSION,
+        SNAPSHOT_RESTORE_DIRECTORY_PERMISSION_DEFAULT)));
+    }
+  }
+
+  /**
+   * Set acl when grant user permission
+   * @param userPerm the user and permission
+   * @param skipNamespaces the namespace set to skip set acl because already 
set
+   * @param skipTables the table set to skip set acl because already set
+   * @return false if an error occurred, otherwise true
+   */
+  public boolean grantAcl(UserPermission userPerm, Set<String> skipNamespaces,
+      Set<TableName> skipTables) {
+    try {
+      long start = System.currentTimeMillis();
+      setHDFSAcl(getHdfsAclOperations(userPerm, 
HDFSAclOperation.OperationType.MODIFY,
+        skipNamespaces, skipTables));
+      LOG.info("Set HDFS acl when grant {}, cost {} ms", userPerm.toString(),
+        System.currentTimeMillis() - start);
+      return true;
+    } catch (Exception e) {
+      LOG.error("Set HDFS acl error when grant: {}", userPerm != null ? 
userPerm.toString() : null,
 
 Review comment:
   done

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
[email protected]


With regards,
Apache Git Services

Reply via email to